From 37fac38fe3dd81c1d6d2ec3fea7dac3f3d75b299 Mon Sep 17 00:00:00 2001 From: Salvatore Daniele Date: Tue, 4 Jun 2024 16:43:49 -0400 Subject: [PATCH 1/3] Update machine config operator Update machine config operator to remove dependency on goproxy. Signed-off-by: Salvatore Daniele --- go.mod | 31 +++++++++++-------- go.sum | 97 ++++++++++++++++++++++++++++++---------------------------- 2 files changed, 69 insertions(+), 59 deletions(-) diff --git a/go.mod b/go.mod index fca66390c..572df7445 100644 --- a/go.mod +++ b/go.mod @@ -18,9 +18,9 @@ require ( github.com/onsi/ginkgo/v2 v2.11.0 github.com/onsi/gomega v1.27.10 github.com/openshift-kni/k8sreporter v1.0.4 - github.com/openshift/api v0.0.0-20221220162201-efeef9d83325 - github.com/openshift/client-go v0.0.0-20220831193253-4950ae70c8ea - github.com/openshift/machine-config-operator v0.0.1-0.20230118083703-fc27a2bdaa85 + github.com/openshift/api v0.0.0-20230807132801-600991d550ac + github.com/openshift/client-go v0.0.0-20230607134213-3cd0021bbee3 + github.com/openshift/machine-config-operator v0.0.1-0.20231024085435-7e1fb719c1ba github.com/pkg/errors v0.9.1 github.com/safchain/ethtool v0.3.0 github.com/spf13/cobra v1.7.0 @@ -47,24 +47,27 @@ require ( github.com/Masterminds/semver/v3 v3.1.1 // indirect github.com/Mellanox/sriovnet v1.0.3 // indirect github.com/StackExchange/wmi v1.2.1 // indirect - github.com/ajeddeloh/go-json v0.0.0-20170920214419-6a2fe990e083 // indirect + github.com/ajeddeloh/go-json v0.0.0-20200220154158-5ae607161559 // indirect + github.com/aws/aws-sdk-go v1.44.204 // indirect github.com/beorn7/perks v1.0.1 // indirect + github.com/blang/semver/v4 v4.0.0 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/chai2010/gettext-go v1.0.2 // indirect - github.com/clarketm/json v1.14.1 // indirect + github.com/clarketm/json v1.17.1 // indirect github.com/coreos/fcct v0.5.0 // indirect - github.com/coreos/go-json v0.0.0-20211020211907-c63f628265de // indirect + github.com/coreos/go-json v0.0.0-20230131223807-18775e0fb4fb // indirect github.com/coreos/go-semver v0.3.1 // indirect github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f // indirect - github.com/coreos/ign-converter v0.0.0-20201123214124-8dac862888aa // indirect + github.com/coreos/ign-converter v0.0.0-20230417193809-cee89ea7d8ff // indirect github.com/coreos/ignition v0.35.0 // indirect - github.com/coreos/ignition/v2 v2.14.0 // indirect - github.com/coreos/vcontext v0.0.0-20211021162308-f1dbbca7bef4 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect + github.com/coreos/ignition/v2 v2.15.0 // indirect + github.com/coreos/vcontext v0.0.0-20230201181013-d72178a18687 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/evanphx/json-patch v5.6.0+incompatible // indirect github.com/evanphx/json-patch/v5 v5.7.0 // indirect github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d // indirect + github.com/frankban/quicktest v1.14.4 // indirect github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 // indirect github.com/go-errors/errors v1.4.2 // indirect github.com/go-logr/zapr v1.2.4 // indirect @@ -95,7 +98,6 @@ require ( github.com/k8snetworkplumbingwg/govdpa v0.1.4 // indirect github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect github.com/mailru/easyjson v0.7.7 // indirect - github.com/mattn/go-isatty v0.0.17 // indirect github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect @@ -107,12 +109,14 @@ require ( github.com/modern-go/reflect2 v1.0.2 // indirect github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/openshift/library-go v0.0.0-20231020125025-211b32f1a1f2 // indirect github.com/peterbourgon/diskv v2.0.1+incompatible // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/client_golang v1.17.0 // indirect github.com/prometheus/client_model v0.5.0 // indirect github.com/prometheus/common v0.45.0 // indirect github.com/prometheus/procfs v0.12.0 // indirect + github.com/robfig/cron v1.2.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/shopspring/decimal v1.2.0 // indirect github.com/spf13/afero v1.9.3 // indirect @@ -142,13 +146,16 @@ require ( gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect howett.net/plist v1.0.0 // indirect + k8s.io/apiserver v0.28.3 // indirect k8s.io/cli-runtime v0.28.3 // indirect k8s.io/component-base v0.28.3 // indirect k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01 // indirect k8s.io/klog/v2 v2.100.1 // indirect + k8s.io/kube-aggregator v0.27.4 // indirect k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect - k8s.io/kubelet v0.25.1 // indirect + k8s.io/kubelet v0.27.7 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect + sigs.k8s.io/kube-storage-version-migrator v0.0.6-0.20230721195810-5c8923c5ff96 // indirect sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 // indirect sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.3.0 // indirect diff --git a/go.sum b/go.sum index 30f10a74f..15dab0f4c 100644 --- a/go.sum +++ b/go.sum @@ -12,7 +12,6 @@ cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6 cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.58.0/go.mod h1:W+9FnSUw6nhVwXlFcp1eL+krq5+HQUJeUogSeJZZiWg= cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= @@ -34,7 +33,6 @@ cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiy cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.9.0/go.mod h1:m+/etGaqZbylxaNT876QGXqEHp4PR2Rq5GMqICWb9bU= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= @@ -54,14 +52,16 @@ github.com/Mellanox/sriovnet v1.0.3 h1:Nlmxr2mkp16aIP4CJcsnqCczxQQgOuzNDm/nu9qTB github.com/Mellanox/sriovnet v1.0.3/go.mod h1:8TlYc3iOTEvUM+WAbC7MU6U6JXqIfhl2DcWIGVUsjIE= github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDOSA= github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8= -github.com/ajeddeloh/go-json v0.0.0-20170920214419-6a2fe990e083 h1:uwcvnXW76Y0rHM+qs7y8iHknWUWXYFNlD6FEVhc47TU= github.com/ajeddeloh/go-json v0.0.0-20170920214419-6a2fe990e083/go.mod h1:otnto4/Icqn88WCcM4bhIJNSgsh9VLBuspyyCfvof9c= +github.com/ajeddeloh/go-json v0.0.0-20200220154158-5ae607161559 h1:4SPQljF/GJ8Q+QlCWMWxRBepub4DresnOm4eI2ebFGc= +github.com/ajeddeloh/go-json v0.0.0-20200220154158-5ae607161559/go.mod h1:otnto4/Icqn88WCcM4bhIJNSgsh9VLBuspyyCfvof9c= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= +github.com/ashcrow/osrelease v0.0.0-20180626175927-9b292693c55c h1:icme0QhxrgZOxTBnT6K8dfGLwbKWSOVwPB95XTbo8Ws= +github.com/ashcrow/osrelease v0.0.0-20180626175927-9b292693c55c/go.mod h1:BRljTyotlu+6N+Qlu5MhjxpdmccCnp9lDvZjNNV8qr4= github.com/aws/aws-sdk-go v1.19.11/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.30.28 h1:SaPM7dlmp7h3Lj1nJ4jdzOkTdom08+g20k7AU5heZYg= -github.com/aws/aws-sdk-go v1.30.28/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= -github.com/beevik/etree v1.1.1-0.20200718192613-4a2f8b9d084c/go.mod h1:0yGO2rna3S9DkITDWHY1bMtcY4IJ4w+4S+EooZUR0bE= +github.com/aws/aws-sdk-go v1.44.204 h1:7/tPUXfNOHB390A63t6fJIwmlwVQAkAwcbzKsU2/6OQ= +github.com/aws/aws-sdk-go v1.44.204/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= @@ -69,6 +69,8 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= +github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -79,16 +81,16 @@ github.com/chai2010/gettext-go v1.0.2/go.mod h1:y+wnP2cHYaVj19NZhYKAwEMH2CI1gNHe github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/clarketm/json v1.14.1 h1:43bkbTTKKdDx7crs3WHzkrnH6S1EvAF1VZrdFGMmmz4= -github.com/clarketm/json v1.14.1/go.mod h1:ynr2LRfb0fQU34l07csRNBTcivjySLLiY1YzQqKVfdo= +github.com/clarketm/json v1.17.1 h1:U1IxjqJkJ7bRK4L6dyphmoO840P6bdhPdbbLySourqI= +github.com/clarketm/json v1.17.1/go.mod h1:ynr2LRfb0fQU34l07csRNBTcivjySLLiY1YzQqKVfdo= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/coreos/fcct v0.5.0 h1:f/z+MCoR2vULes+MyoPEApQ6iluy/JbXoRi6dahPItQ= github.com/coreos/fcct v0.5.0/go.mod h1:cbE+j77YSQwFB2fozWVB3qsI2Pi3YiVEbDz/b6Yywdo= -github.com/coreos/go-json v0.0.0-20211020211907-c63f628265de h1:qZvNu52Tv7Jfbgxdw3ONHf0BK9UpuSxi9FA9Y+qU5VU= -github.com/coreos/go-json v0.0.0-20211020211907-c63f628265de/go.mod h1:lryFBkhadOfv8Jue2Vr/f/Yviw8h1DQPQojbXqEChY0= +github.com/coreos/go-json v0.0.0-20230131223807-18775e0fb4fb h1:rmqyI19j3Z/74bIRhuC59RB442rXUazKNueVpfJPxg4= +github.com/coreos/go-json v0.0.0-20230131223807-18775e0fb4fb/go.mod h1:rcFZM3uxVvdyNmsAV2jopgPD1cs5SPWJWU5dOz2LUnw= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= @@ -96,28 +98,27 @@ github.com/coreos/go-systemd v0.0.0-20181031085051-9002847aa142/go.mod h1:F5haX7 github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f h1:JOrtw2xFKzlg+cbHpyrpLDmnN1HqhBfnX7WDiW7eG2c= github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/coreos/ign-converter v0.0.0-20201123214124-8dac862888aa h1:oIF6XCee+GoGNTykmC2pvx3A3d5ES/CHAB2H0beGji0= -github.com/coreos/ign-converter v0.0.0-20201123214124-8dac862888aa/go.mod h1:pqAsDWa5YDi10Va/aqQI0bwOs9hXqoE2xwb5vnFys5s= +github.com/coreos/ign-converter v0.0.0-20230417193809-cee89ea7d8ff h1:2ZvV3ZMt287w2lp/2H3XS3v7lwuxD5Y3PB7sJIcW8q8= +github.com/coreos/ign-converter v0.0.0-20230417193809-cee89ea7d8ff/go.mod h1:y5rWUGlcF7G4X20lKcfmolFY5THaxgT+ZcNcn+vqIn0= github.com/coreos/ignition v0.35.0 h1:UFodoYq1mOPrbEjtxIsZbThcDyQwAI1owczRDqWmKkQ= github.com/coreos/ignition v0.35.0/go.mod h1:WJQapxzEn9DE0ryxsGvm8QnBajm/XsS/PkrDqSpz+bA= github.com/coreos/ignition/v2 v2.1.1/go.mod h1:RqmqU64zxarUJa3l4cHtbhcSwfQLpUhv0WVziZwoXvE= -github.com/coreos/ignition/v2 v2.7.0/go.mod h1:3CjaRpg51hmJzPjarbzB0RvSZbLkNOczxKJobTl6nOY= -github.com/coreos/ignition/v2 v2.14.0 h1:KfkCCnA6AK0kts/1zxzzNH5lDMCQN9sqqGcGs+RJVX4= -github.com/coreos/ignition/v2 v2.14.0/go.mod h1:wxc4qdYEIHLygzWbVVEuoD7lQGTZmMgX0VjAPYBbeEQ= +github.com/coreos/ignition/v2 v2.15.0 h1:v2fQ6QvkcAF+La5PHHpnpBS1eGZo+LYL1wTOPvDKAcs= +github.com/coreos/ignition/v2 v2.15.0/go.mod h1:+7BiKurzCFg3P427Ml0wqnKzIuhLimnil6LhFV2DkJM= github.com/coreos/vcontext v0.0.0-20190529201340-22b159166068/go.mod h1:E+6hug9bFSe0KZ2ZAzr8M9F5JlArJjv5D1JS7KSkPKE= github.com/coreos/vcontext v0.0.0-20191017033345-260217907eb5/go.mod h1:E+6hug9bFSe0KZ2ZAzr8M9F5JlArJjv5D1JS7KSkPKE= -github.com/coreos/vcontext v0.0.0-20211021162308-f1dbbca7bef4 h1:pfSsrvbjUFGINaPGy0mm2QKQKTdq7IcbUa+nQwsz2UM= -github.com/coreos/vcontext v0.0.0-20211021162308-f1dbbca7bef4/go.mod h1:HckqHnP/HI41vS0bfVjJ20u6jD0biI5+68QwZm5Xb9U= +github.com/coreos/vcontext v0.0.0-20230201181013-d72178a18687 h1:uSmlDgJGbUB0bwQBcZomBTottKwEDF5fF8UjSwKSzWM= +github.com/coreos/vcontext v0.0.0-20230201181013-d72178a18687/go.mod h1:Salmysdw7DAVuobBW/LwsKKgpyCPHUhjyJoMJD+ZJiI= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= @@ -133,10 +134,10 @@ github.com/evanphx/json-patch/v5 v5.7.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d h1:105gxyaGwCFad8crR9dcMQWvV9Hvulu6hwUh4tWPJnM= github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= -github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= -github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= -github.com/frankban/quicktest v1.14.3/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps= +github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= +github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= +github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY= +github.com/frankban/quicktest v1.14.4/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= @@ -164,11 +165,9 @@ github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU= github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= -github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= github.com/godbus/dbus v0.0.0-20181025153459-66d97aec3384/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= -github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= @@ -238,7 +237,6 @@ github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200507031123-427632fa3b1c/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= @@ -286,7 +284,8 @@ github.com/jaypipes/pcidb v1.0.0 h1:vtZIfkiCUE42oYbJS0TAq9XSfSmcsgo9IdxSm9qzYU8= github.com/jaypipes/pcidb v1.0.0/go.mod h1:TnYUvqhPBzCKnH34KrIX22kAeEbDCSRJ9cqLRCuNDfk= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= @@ -359,15 +358,18 @@ github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI= github.com/onsi/gomega v1.27.10/go.mod h1:RsS8tutOdbdgzbPtzzATp12yT7kM5I5aElG3evPbQ0M= github.com/openshift-kni/k8sreporter v1.0.4 h1:jEwX6Pqei60kO1U0JLo+ePjQaP7DNn/M6d63KCS2tS0= github.com/openshift-kni/k8sreporter v1.0.4/go.mod h1:fg8HI9yxiKAi6UzR6NTtrmQmA2WKzUqmkRUHwQ1+Bj8= -github.com/openshift/api v0.0.0-20221220162201-efeef9d83325 h1:tUmCk1IW44nT8YjgNCFa6r8lq/jlRrsfb8PLcFEsyb8= -github.com/openshift/api v0.0.0-20221220162201-efeef9d83325/go.mod h1:OW9hi5XDXOQWm/kRqUww6RVxZSf0nqrS4heerSmHBC4= -github.com/openshift/client-go v0.0.0-20220831193253-4950ae70c8ea h1:7JbjIzWt3Q75ErY1PAZ+gCA+bErI6HSlpffHFmMMzqM= -github.com/openshift/client-go v0.0.0-20220831193253-4950ae70c8ea/go.mod h1:+J8DqZC60acCdpYkwVy/KH4cudgWiFZRNOBeghCzdGA= -github.com/openshift/machine-config-operator v0.0.1-0.20230118083703-fc27a2bdaa85 h1:iJ6S94vL43WjldQ+6VexZUUbDwymRW38ATCkCKEL3nw= -github.com/openshift/machine-config-operator v0.0.1-0.20230118083703-fc27a2bdaa85/go.mod h1:RP4FG/4aZv2c3QzhOZQn0im1OZqSzicg3aaFqKU9IIA= +github.com/openshift/api v0.0.0-20230807132801-600991d550ac h1:HqT8MmYGXiUGUW0BjygTGOOvqO2wIsTaG3q8nboJyPY= +github.com/openshift/api v0.0.0-20230807132801-600991d550ac/go.mod h1:yimSGmjsI+XF1mr+AKBs2//fSXIOhhetHGbMlBEfXbs= +github.com/openshift/client-go v0.0.0-20230607134213-3cd0021bbee3 h1:uVCq/Sx2y4UZh+qCsCL1BBUJpc3DULHkN4j7XHHgHtw= +github.com/openshift/client-go v0.0.0-20230607134213-3cd0021bbee3/go.mod h1:M+VUIcqx5IvgzejcbgmQnxETPrXRYlcufHpw2bAgz9Y= +github.com/openshift/library-go v0.0.0-20231020125025-211b32f1a1f2 h1:TWG/YVRhSvjYq8iIwJ2Wpoopgg0zuh+ZAl1RSm4J8Z0= +github.com/openshift/library-go v0.0.0-20231020125025-211b32f1a1f2/go.mod h1:ZFwNwC3opc/7aOvzUbU95zp33Lbxet48h80ryH3p6DY= +github.com/openshift/machine-config-operator v0.0.1-0.20231024085435-7e1fb719c1ba h1:WM6K+m2xMAwbQDetKGhV/Rd8yukF3AsU1z74cqoWrz0= +github.com/openshift/machine-config-operator v0.0.1-0.20231024085435-7e1fb719c1ba/go.mod h1:mSt3ACow31pa1hTRONn+yT5e+KFkgi7G2bFEx5Nj+n0= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pin/tftp v2.1.0+incompatible/go.mod h1:xVpZOMCXTy+A5QMjEVN0Glwa1sUvaJhFXbr/aAxuxGY= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -384,7 +386,10 @@ github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lne github.com/prometheus/common v0.45.0/go.mod h1:YJmSTw9BoKxJplESWWxlbyttQR4uaEcGyv9MZjVOJsY= github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= +github.com/robfig/cron v1.2.0 h1:ZjScXvvxeQ63Dbyxy76Fj3AT3Ut0aKsyd2/tl3DTMuQ= +github.com/robfig/cron v1.2.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= @@ -435,7 +440,6 @@ github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1 github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8= github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= github.com/vmware/vmw-guestinfo v0.0.0-20170707015358-25eff159a728/go.mod h1:x9oS4Wk2s2u4tS29nEaDLdzvuHdB19CvSGJjPgkZJNk= -github.com/vmware/vmw-guestinfo v0.0.0-20220317130741-510905f0efa3/go.mod h1:CSBTxrhePCm0cmXNKDGeu+6bOQzpaEklfCqEpn89JWk= github.com/vmware/vmw-ovflib v0.0.0-20170608004843-1f217b9dc714/go.mod h1:jiPk45kn7klhByRvUq5i2vo1RtHKBHj+iWGFpxbXuuI= github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ= github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= @@ -540,7 +544,6 @@ golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= @@ -551,6 +554,7 @@ golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -607,7 +611,6 @@ golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200610111108-226ff32320da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -626,7 +629,7 @@ golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= @@ -634,6 +637,7 @@ golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -644,6 +648,7 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -688,9 +693,6 @@ golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200601175630-2caf76543d99/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200606014950-c42cb6316fb6/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200610160956-3e83d1e96d0e/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= @@ -727,7 +729,6 @@ google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/ google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.26.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= @@ -768,9 +769,6 @@ google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200603110839-e855014d5736/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200608115520-7c474a2e3482/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200610104632-a5b850bcf112/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= @@ -835,7 +833,6 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20190502103701-55513cacd4ae/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20191010095647-fc94e3f71652/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -853,6 +850,8 @@ k8s.io/apiextensions-apiserver v0.28.3 h1:Od7DEnhXHnHPZG+W9I97/fSQkVpVPQx2diy+2E k8s.io/apiextensions-apiserver v0.28.3/go.mod h1:NE1XJZ4On0hS11aWWJUTNkmVB03j9LM7gJSisbRt8Lc= k8s.io/apimachinery v0.28.3 h1:B1wYx8txOaCQG0HmYF6nbpU8dg6HvA06x5tEffvOe7A= k8s.io/apimachinery v0.28.3/go.mod h1:uQTKmIqs+rAYaq+DFaoD2X7pcjLOqbQX2AOiO0nIpb8= +k8s.io/apiserver v0.28.3 h1:8Ov47O1cMyeDzTXz0rwcfIIGAP/dP7L8rWbEljRcg5w= +k8s.io/apiserver v0.28.3/go.mod h1:YIpM+9wngNAv8Ctt0rHG4vQuX/I5rvkEMtZtsxW2rNM= k8s.io/cli-runtime v0.28.3 h1:lvuJYVkwCqHEvpS6KuTZsUVwPePFjBfSGvuaLl2SxzA= k8s.io/cli-runtime v0.28.3/go.mod h1:jeX37ZPjIcENVuXDDTskG3+FnVuZms5D9omDXS/2Jjc= k8s.io/client-go v0.28.3 h1:2OqNb72ZuTZPKCl+4gTKvqao0AMOl9f3o2ijbAj3LI4= @@ -866,12 +865,14 @@ k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAE k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/kube-aggregator v0.27.4 h1:WdK9iiBr32G8bWfpUEFVQl70RZO2dU19ZAktUXL5JFc= +k8s.io/kube-aggregator v0.27.4/go.mod h1:+eG83gkAyh0uilQEAOgheeQW4hr+PkyV+5O1nLGsjlM= k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= k8s.io/kubectl v0.28.3 h1:H1Peu1O3EbN9zHkJCcvhiJ4NUj6lb88sGPO5wrWIM6k= k8s.io/kubectl v0.28.3/go.mod h1:RDAudrth/2wQ3Sg46fbKKl4/g+XImzvbsSRZdP2RiyE= -k8s.io/kubelet v0.25.1 h1:FBGOmIM4qR4Ov+RU90VXnxO/hvvniUMTGUriVOa9FfY= -k8s.io/kubelet v0.25.1/go.mod h1:mXo8HjxCrwVduGBk4tzuhegJYPvNwPyycRf39H4KKqE= +k8s.io/kubelet v0.27.7 h1:DiptBLFbl6nyadTP9DUfhiReasBDV1qyE1r8h2o5mXc= +k8s.io/kubelet v0.27.7/go.mod h1:WKoEgiCa6/hzmgN4UgVioEwcpLC8wg+9Xzzc8fqOCYs= k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= @@ -881,6 +882,8 @@ sigs.k8s.io/controller-runtime v0.16.3 h1:2TuvuokmfXvDUamSx1SuAOO3eTyye+47mJCigw sigs.k8s.io/controller-runtime v0.16.3/go.mod h1:j7bialYoSn142nv9sCOJmQgDXQXxnroFU4VnX/brVJ0= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/kube-storage-version-migrator v0.0.6-0.20230721195810-5c8923c5ff96 h1:PFWFSkpArPNJxFX4ZKWAk9NSeRoZaXschn+ULa4xVek= +sigs.k8s.io/kube-storage-version-migrator v0.0.6-0.20230721195810-5c8923c5ff96/go.mod h1:EOBQyBowOUsd7U4CJnMHNE0ri+zCXyouGdLwC/jZU+I= sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 h1:XX3Ajgzov2RKUdc5jW3t5jwY7Bo7dcRm+tFxT+NfgY0= sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3/go.mod h1:9n16EZKMhXBNSiUC5kSdFQJkdH3zbxS/JoO619G1VAY= sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3 h1:W6cLQc5pnqM7vh3b7HvGNfXrJ/xL6BDMS0v1V/HHg5U= From be64d74755ad9873e6e6bc80369ae43c7d0958f3 Mon Sep 17 00:00:00 2001 From: Salvatore Daniele Date: Tue, 4 Jun 2024 16:44:17 -0400 Subject: [PATCH 2/3] go mod vendor Signed-off-by: Salvatore Daniele --- vendor/github.com/ajeddeloh/go-json/OWNERS | 19 + vendor/github.com/aws/aws-sdk-go/LICENSE.txt | 202 + vendor/github.com/aws/aws-sdk-go/NOTICE.txt | 3 + .../github.com/aws/aws-sdk-go/aws/arn/arn.go | 93 + vendor/github.com/blang/semver/v4/LICENSE | 22 + vendor/github.com/blang/semver/v4/json.go | 23 + vendor/github.com/blang/semver/v4/range.go | 416 + vendor/github.com/blang/semver/v4/semver.go | 476 + vendor/github.com/blang/semver/v4/sort.go | 28 + vendor/github.com/blang/semver/v4/sql.go | 30 + vendor/github.com/clarketm/json/decode.go | 120 +- vendor/github.com/clarketm/json/encode.go | 114 +- vendor/github.com/clarketm/json/fuzz.go | 1 + vendor/github.com/clarketm/json/scanner.go | 31 +- vendor/github.com/coreos/go-json/README.md | 18 +- vendor/github.com/coreos/go-json/decode.go | 99 +- vendor/github.com/coreos/go-json/encode.go | 104 +- vendor/github.com/coreos/go-json/fuzz.go | 10 +- vendor/github.com/coreos/go-json/scanner.go | 2 +- vendor/github.com/coreos/go-json/stream.go | 8 +- vendor/github.com/coreos/go-json/tags.go | 16 +- .../translate/v23tov30/v23tov30.go | 48 +- .../translate/v32tov31/v32tov31.go | 3 +- .../translate/v33tov32/v33tov32.go | 133 + .../translate/v34tov33/v34tov33.go | 192 + .../v2/config/shared/errors/errors.go | 11 + .../ignition/v2/config/shared/parse/unit.go | 37 + .../coreos/ignition/v2/config/util/config.go | 1 + .../ignition/v2/config/v3_0/types/config.go | 38 + .../ignition/v2/config/v3_0/types/storage.go | 42 +- .../ignition/v2/config/v3_0/types/systemd.go | 61 + .../ignition/v2/config/v3_0/types/unit.go | 20 +- .../ignition/v2/config/v3_1/types/config.go | 38 + .../ignition/v2/config/v3_1/types/storage.go | 42 +- .../ignition/v2/config/v3_1/types/systemd.go | 61 + .../ignition/v2/config/v3_1/types/unit.go | 20 +- .../ignition/v2/config/v3_2/types/config.go | 38 + .../ignition/v2/config/v3_2/types/storage.go | 42 +- .../ignition/v2/config/v3_2/types/systemd.go | 61 + .../ignition/v2/config/v3_2/types/unit.go | 20 +- .../coreos/ignition/v2/config/v3_3/config.go | 78 + .../v2/config/v3_3/translate/translate.go | 95 + .../ignition/v2/config/v3_3/types/clevis.go | 49 + .../ignition/v2/config/v3_3/types/config.go | 64 + .../ignition/v2/config/v3_3/types/device.go | 25 + .../v2/config/v3_3/types/directory.go | 27 + .../ignition/v2/config/v3_3/types/disk.go | 135 + .../ignition/v2/config/v3_3/types/file.go | 44 + .../v2/config/v3_3/types/filesystem.go | 106 + .../ignition/v2/config/v3_3/types/headers.go | 65 + .../ignition/v2/config/v3_3/types/ignition.go | 49 + .../ignition/v2/config/v3_3/types/kargs.go | 22 + .../ignition/v2/config/v3_3/types/luks.go | 71 + .../ignition/v2/config/v3_3/types/mode.go | 36 + .../ignition/v2/config/v3_3/types/node.go | 59 + .../v2/config/v3_3/types/partition.go | 91 + .../ignition/v2/config/v3_3/types/passwd.go | 23 + .../ignition/v2/config/v3_3/types/path.go | 42 + .../ignition/v2/config/v3_3/types/proxy.go | 49 + .../ignition/v2/config/v3_3/types/raid.go | 62 + .../ignition/v2/config/v3_3/types/resource.go | 91 + .../ignition/v2/config/v3_3/types/schema.go | 254 + .../ignition/v2/config/v3_3/types/storage.go | 115 + .../ignition/v2/config/v3_3/types/systemd.go | 61 + .../ignition/v2/config/v3_3/types/tang.go | 51 + .../ignition/v2/config/v3_3/types/tls.go | 27 + .../ignition/v2/config/v3_3/types/unit.go | 68 + .../ignition/v2/config/v3_3/types/url.go | 57 + .../v2/config/v3_3/types/verification.go | 71 + .../coreos/ignition/v2/config/v3_4/config.go | 78 + .../v2/config/v3_4/translate/translate.go | 85 + .../ignition/v2/config/v3_4/types/clevis.go | 49 + .../ignition/v2/config/v3_4/types/config.go | 64 + .../ignition/v2/config/v3_4/types/device.go | 25 + .../v2/config/v3_4/types/directory.go | 26 + .../ignition/v2/config/v3_4/types/disk.go | 135 + .../ignition/v2/config/v3_4/types/file.go | 43 + .../v2/config/v3_4/types/filesystem.go | 106 + .../ignition/v2/config/v3_4/types/headers.go | 65 + .../ignition/v2/config/v3_4/types/ignition.go | 49 + .../ignition/v2/config/v3_4/types/kargs.go | 22 + .../ignition/v2/config/v3_4/types/luks.go | 71 + .../ignition/v2/config/v3_4/types/mode.go | 26 + .../ignition/v2/config/v3_4/types/node.go | 59 + .../v2/config/v3_4/types/partition.go | 91 + .../ignition/v2/config/v3_4/types/passwd.go | 23 + .../ignition/v2/config/v3_4/types/path.go | 42 + .../ignition/v2/config/v3_4/types/proxy.go | 49 + .../ignition/v2/config/v3_4/types/raid.go | 62 + .../ignition/v2/config/v3_4/types/resource.go | 91 + .../ignition/v2/config/v3_4/types/schema.go | 259 + .../ignition/v2/config/v3_4/types/storage.go | 115 + .../ignition/v2/config/v3_4/types/systemd.go | 61 + .../ignition/v2/config/v3_4/types/tang.go | 65 + .../ignition/v2/config/v3_4/types/tls.go | 27 + .../ignition/v2/config/v3_4/types/unit.go | 68 + .../ignition/v2/config/v3_4/types/url.go | 83 + .../v2/config/v3_4/types/verification.go | 71 + .../openshift/api/.ci-operator.yaml | 4 + .../github.com/openshift/api/.gitattributes | 7 + vendor/github.com/openshift/api/.gitignore | 19 + vendor/github.com/openshift/api/Makefile | 123 + vendor/github.com/openshift/api/OWNERS | 29 + vendor/github.com/openshift/api/README.md | 88 + .../openshift/api/apiserver/.codegen.yaml | 2 + .../openshift/api/apiserver/install.go | 22 + .../openshift/api/apiserver/v1/Makefile | 3 + ...piserver.openshift.io_apirequestcount.yaml | 254 + .../openshift/api/apiserver/v1/doc.go | 8 + .../openshift/api/apiserver/v1/register.go | 38 + .../v1/stable.apirequestcount.testsuite.yaml | 15 + .../api/apiserver/v1/types_apirequestcount.go | 171 + .../api/apiserver/v1/zz_generated.deepcopy.go | 202 + .../v1/zz_generated.swagger_doc_generated.go | 97 + vendor/github.com/openshift/api/apps/OWNERS | 3 + .../github.com/openshift/api/apps/install.go | 26 + .../openshift/api/apps/v1/consts.go | 108 + .../api/apps/v1/deprecated_consts.go | 38 + .../github.com/openshift/api/apps/v1/doc.go | 9 + .../openshift/api/apps/v1/generated.pb.go | 7461 +++++++ .../openshift/api/apps/v1/generated.proto | 490 + .../openshift/api/apps/v1/legacy.go | 28 + .../openshift/api/apps/v1/register.go | 45 + .../github.com/openshift/api/apps/v1/types.go | 537 + .../api/apps/v1/zz_generated.deepcopy.go | 682 + .../v1/zz_generated.swagger_doc_generated.go | 284 + .../v1/zz_prerelease_lifecycle_generated.go | 114 + .../openshift/api/authorization/install.go | 26 + ...enshift_01_rolebindingrestriction.crd.yaml | 158 + .../openshift/api/authorization/v1/Makefile | 3 + .../openshift/api/authorization/v1/codec.go | 139 + .../openshift/api/authorization/v1/doc.go | 9 + .../api/authorization/v1/generated.pb.go | 8812 ++++++++ .../api/authorization/v1/generated.proto | 557 + .../openshift/api/authorization/v1/legacy.go | 43 + .../api/authorization/v1/register.go | 60 + ...able.rolebindingrestriction.testsuite.yaml | 14 + .../openshift/api/authorization/v1/types.go | 632 + .../authorization/v1/zz_generated.deepcopy.go | 994 + .../v1/zz_generated.swagger_doc_generated.go | 364 + vendor/github.com/openshift/api/build/OWNERS | 4 + .../github.com/openshift/api/build/install.go | 26 + .../openshift/api/build/v1/consts.go | 200 + .../github.com/openshift/api/build/v1/doc.go | 8 + .../openshift/api/build/v1/generated.pb.go | 17545 ++++++++++++++++ .../openshift/api/build/v1/generated.proto | 1239 ++ .../openshift/api/build/v1/legacy.go | 28 + .../openshift/api/build/v1/register.go | 47 + .../openshift/api/build/v1/types.go | 1469 ++ .../api/build/v1/zz_generated.deepcopy.go | 1610 ++ .../v1/zz_generated.swagger_doc_generated.go | 692 + .../openshift/api/cloudnetwork/OWNERS | 6 + .../openshift/api/cloudnetwork/install.go | 26 + .../v1/001-cloudprivateipconfig.crd.yaml | 107 + .../001-cloudprivateipconfig.crd.yaml-patch | 10 + .../openshift/api/cloudnetwork/v1/Makefile | 3 + .../openshift/api/cloudnetwork/v1/doc.go | 5 + .../api/cloudnetwork/v1/generated.pb.go | 1045 + .../api/cloudnetwork/v1/generated.proto | 87 + .../openshift/api/cloudnetwork/v1/register.go | 37 + ...stable.cloudprivateipconfig.testsuite.yaml | 18 + .../openshift/api/cloudnetwork/v1/types.go | 91 + .../cloudnetwork/v1/zz_generated.deepcopy.go | 111 + .../v1/zz_generated.swagger_doc_generated.go | 54 + .../openshift/api/config/.codegen.yaml | 8 + .../openshift/api/config/install.go | 27 + ...ersion-operator_01_clusterversion.crd.yaml | 14 + ...ator_01_apiserver-CustomNoUpgrade.crd.yaml | 179 + ...ig-operator_01_apiserver-Default.crd.yaml} | 2 + ...01_apiserver-TechPreviewNoUpgrade.crd.yaml | 179 + .../0000_10_config-operator_01_build.crd.yaml | 17 +- ...g-operator_01_dns-CustomNoUpgrade.crd.yaml | 114 + ...0_config-operator_01_dns-Default.crd.yaml} | 42 + ...rator_01_dns-TechPreviewNoUpgrade.crd.yaml | 114 + ...10_config-operator_01_featuregate.crd.yaml | 90 + ...01_infrastructure-CustomNoUpgrade.crd.yaml | 999 + ...rastructure-CustomNoUpgrade.crd.yaml-patch | 24 + ...perator_01_infrastructure-Default.crd.yaml | 478 +- ...r_01_infrastructure-Default.crd.yaml-patch | 24 + ...frastructure-TechPreviewNoUpgrade.crd.yaml | 232 +- .../config/v1/custom.apiserver.testsuite.yaml | 35 + .../api/config/v1/custom.dns.testsuite.yaml | 104 + .../v1/custom.infrastructure.testsuite.yaml | 321 + .../openshift/api/config/v1/feature_gates.go | 304 + .../config/v1/stable.apiserver.testsuite.yaml | 26 +- .../api/config/v1/stable.dns.testsuite.yaml | 93 +- .../v1/stable.infrastructure.testsuite.yaml | 907 + .../v1/techpreview.apiserver.testsuite.yaml | 35 + .../config/v1/techpreview.dns.testsuite.yaml | 14 + .../techpreview.infrastructure.testsuite.yaml | 505 + .../api/config/v1/types_apiserver.go | 14 +- .../api/config/v1/types_authentication.go | 8 +- .../openshift/api/config/v1/types_build.go | 8 +- .../api/config/v1/types_cluster_operator.go | 8 +- .../api/config/v1/types_cluster_version.go | 95 +- .../openshift/api/config/v1/types_console.go | 8 +- .../openshift/api/config/v1/types_dns.go | 45 +- .../openshift/api/config/v1/types_feature.go | 171 +- .../openshift/api/config/v1/types_image.go | 8 +- .../config/v1/types_image_content_policy.go | 8 +- .../v1/types_image_digest_mirror_set.go | 8 +- .../config/v1/types_image_tag_mirror_set.go | 8 +- .../api/config/v1/types_infrastructure.go | 383 +- .../openshift/api/config/v1/types_ingress.go | 8 +- .../openshift/api/config/v1/types_network.go | 8 +- .../openshift/api/config/v1/types_node.go | 8 +- .../openshift/api/config/v1/types_oauth.go | 19 +- .../api/config/v1/types_operatorhub.go | 8 +- .../openshift/api/config/v1/types_project.go | 8 +- .../openshift/api/config/v1/types_proxy.go | 8 +- .../api/config/v1/types_scheduling.go | 8 +- .../api/config/v1/zz_generated.deepcopy.go | 303 +- .../v1/zz_generated.swagger_doc_generated.go | 359 +- ...or_01_backup-TechPreviewNoUpgrade.crd.yaml | 100 + ...ig-operator_01_insightsdatagather.crd.yaml | 62 + .../openshift/api/config/v1alpha1/Makefile | 3 + .../openshift/api/config/v1alpha1/doc.go | 8 + .../openshift/api/config/v1alpha1/register.go | 40 + .../techpreview.backup.testsuite.yaml | 202 + ...hpreview.insightsdatagather.testsuite.yaml | 14 + .../api/config/v1alpha1/types_backup.go | 168 + .../api/config/v1alpha1/types_insights.go | 82 + .../config/v1alpha1/zz_generated.deepcopy.go | 294 + .../zz_generated.swagger_doc_generated.go | 121 + .../openshift/api/console/.codegen.yaml | 2 + .../github.com/openshift/api/console/OWNERS | 3 + .../openshift/api/console/install.go | 27 + .../console/v1/00_consoleclidownload.crd.yaml | 77 + .../v1/00_consoleexternalloglink.crd.yaml | 68 + .../api/console/v1/00_consolelink.crd.yaml | 125 + .../v1/00_consolenotification.crd.yaml | 84 + .../console/v1/00_consolequickstart.crd.yaml | 165 + .../api/console/v1/00_consolesample.crd.yaml | 167 + .../console/v1/00_consoleyamlsample.crd.yaml | 74 + .../api/console/v1/90_consoleplugin.crd.yaml | 294 + .../openshift/api/console/v1/Makefile | 3 + .../openshift/api/console/v1/doc.go | 7 + .../openshift/api/console/v1/register.go | 53 + .../stable.consoleclidownload.testsuite.yaml | 20 + ...able.consoleexternalloglink.testsuite.yaml | 18 + .../v1/stable.consolelink.testsuite.yaml | 20 + .../stable.consolenotification.testsuite.yaml | 16 + .../v1/stable.consoleplugin.testsuite.yaml | 88 + .../stable.consolequickstart.testsuite.yaml | 28 + .../v1/stable.consolesample.testsuite.yaml | 183 + .../stable.consoleyamlsample.testsuite.yaml | 26 + .../openshift/api/console/v1/types.go | 10 + .../console/v1/types_console_cli_download.go | 54 + .../v1/types_console_external_log_links.go | 65 + .../api/console/v1/types_console_link.go | 94 + .../console/v1/types_console_notification.go | 68 + .../api/console/v1/types_console_plugin.go | 244 + .../console/v1/types_console_quick_start.go | 143 + .../api/console/v1/types_console_sample.go | 266 + .../console/v1/types_console_yaml_sample.go | 67 + .../api/console/v1/zz_generated.deepcopy.go | 1032 + .../v1/zz_generated.swagger_doc_generated.go | 460 + .../v1alpha1/90_consoleplugin.crd.yaml | 294 + .../openshift/api/console/v1alpha1/Makefile | 3 + .../openshift/api/console/v1alpha1/doc.go | 6 + .../api/console/v1alpha1/register.go | 39 + .../stable.consoleplugin.testsuite.yaml | 23 + .../openshift/api/console/v1alpha1/types.go | 1 + .../console/v1alpha1/types_console_plugin.go | 174 + .../console/v1alpha1/zz_generated.deepcopy.go | 141 + .../zz_generated.swagger_doc_generated.go | 79 + .../openshift/api/helm/.codegen.yaml | 2 + .../github.com/openshift/api/helm/install.go | 26 + .../0000_10-helm-chart-repository.crd.yaml | 130 + ..._10-project-helm-chart-repository.crd.yaml | 139 + .../openshift/api/helm/v1beta1/Makefile | 3 + .../openshift/api/helm/v1beta1/doc.go | 8 + .../openshift/api/helm/v1beta1/register.go | 40 + .../stable.helmchartrepository.testsuite.yaml | 14 + ....projecthelmchartrepository.testsuite.yaml | 14 + .../v1beta1/types_helm_chart_repository.go | 99 + .../types_project_helm_chart_repository.go | 99 + .../api/helm/v1beta1/zz_generated.deepcopy.go | 227 + .../zz_generated.swagger_doc_generated.go | 107 + .../openshift/api/image/.codegen.yaml | 2 + vendor/github.com/openshift/api/image/OWNERS | 5 + .../openshift/api/image/docker10/doc.go | 4 + .../openshift/api/image/docker10/register.go | 47 + .../api/image/docker10/types_docker.go | 60 + .../image/docker10/zz_generated.deepcopy.go | 114 + .../zz_generated.swagger_doc_generated.go | 30 + .../api/image/dockerpre012/deepcopy.go | 18 + .../openshift/api/image/dockerpre012/doc.go | 4 + .../api/image/dockerpre012/register.go | 46 + .../api/image/dockerpre012/types_docker.go | 140 + .../dockerpre012/zz_generated.deepcopy.go | 217 + .../zz_generated.swagger_doc_generated.go | 55 + .../github.com/openshift/api/image/install.go | 26 + .../openshift/api/image/v1/consts.go | 69 + .../github.com/openshift/api/image/v1/doc.go | 8 + .../openshift/api/image/v1/generated.pb.go | 11572 ++++++++++ .../openshift/api/image/v1/generated.proto | 746 + .../openshift/api/image/v1/legacy.go | 33 + .../openshift/api/image/v1/register.go | 54 + .../openshift/api/image/v1/types.go | 766 + .../api/image/v1/zz_generated.deepcopy.go | 1045 + .../v1/zz_generated.swagger_doc_generated.go | 444 + .../openshift/api/imageregistry/.codegen.yaml | 2 + .../openshift/api/imageregistry/install.go | 26 + .../v1/00_imageregistry.crd.yaml | 1263 ++ .../v1/00_imageregistry.crd.yaml-patch | 13 + .../imageregistry/v1/01_imagepruner.crd.yaml | 644 + .../openshift/api/imageregistry/v1/Makefile | 3 + .../openshift/api/imageregistry/v1/doc.go | 3 + .../api/imageregistry/v1/register.go | 48 + .../v1/stable.config.testsuite.yaml | 18 + .../v1/stable.imagepruner.testsuite.yaml | 15 + .../openshift/api/imageregistry/v1/types.go | 489 + .../api/imageregistry/v1/types_imagepruner.go | 112 + .../imageregistry/v1/zz_generated.deepcopy.go | 637 + .../v1/zz_generated.swagger_doc_generated.go | 311 + vendor/github.com/openshift/api/install.go | 169 + .../api/kubecontrolplane/.codegen.yaml | 2 + .../openshift/api/kubecontrolplane/install.go | 26 + .../openshift/api/kubecontrolplane/v1/doc.go | 7 + .../api/kubecontrolplane/v1/register.go | 38 + .../api/kubecontrolplane/v1/types.go | 219 + .../v1/zz_generated.deepcopy.go | 379 + .../v1/zz_generated.swagger_doc_generated.go | 161 + .../openshift/api/legacyconfig/v1/doc.go | 7 + .../openshift/api/legacyconfig/v1/register.go | 46 + .../api/legacyconfig/v1/serialization.go | 87 + .../api/legacyconfig/v1/stringsource.go | 31 + .../openshift/api/legacyconfig/v1/types.go | 1599 ++ .../legacyconfig/v1/zz_generated.deepcopy.go | 2143 ++ .../v1/zz_generated.swagger_doc_generated.go | 977 + .../openshift/api/machine/.codegen.yaml | 2 + .../github.com/openshift/api/machine/OWNERS | 4 + .../openshift/api/machine/install.go | 32 + .../0000_10_controlplanemachineset.crd.yaml | 564 + .../openshift/api/machine/v1/Makefile | 3 + .../openshift/api/machine/v1/common.go | 13 + .../openshift/api/machine/v1/doc.go | 7 + .../openshift/api/machine/v1/register.go | 40 + ....controlplanemachineset.aws.testsuite.yaml | 368 + ...ontrolplanemachineset.azure.testsuite.yaml | 74 + ....controlplanemachineset.gcp.testsuite.yaml | 74 + ...olplanemachineset.openstack.testsuite.yaml | 632 + ...able.controlplanemachineset.testsuite.yaml | 488 + .../api/machine/v1/types_alibabaprovider.go | 374 + .../openshift/api/machine/v1/types_aws.go | 49 + .../v1/types_controlplanemachineset.go | 420 + .../api/machine/v1/types_nutanixprovider.go | 169 + .../api/machine/v1/types_powervsprovider.go | 227 + .../api/machine/v1/zz_generated.deepcopy.go | 942 + .../v1/zz_generated.swagger_doc_generated.go | 411 + .../openshift/api/machine/v1alpha1/doc.go | 7 + .../api/machine/v1alpha1/register.go | 38 + .../api/machine/v1alpha1/types_openstack.go | 368 + .../machine/v1alpha1/zz_generated.deepcopy.go | 346 + .../zz_generated.swagger_doc_generated.go | 196 + .../machine/v1beta1/0000_10_machine.crd.yaml | 329 + .../v1beta1/0000_10_machinehealthcheck.yaml | 195 + .../v1beta1/0000_10_machineset.crd.yaml | 351 + .../openshift/api/machine/v1beta1/Makefile | 3 + .../openshift/api/machine/v1beta1/doc.go | 7 + .../openshift/api/machine/v1beta1/register.go | 44 + .../v1beta1/stable.machine.testsuite.yaml | 14 + .../stable.machinehealthcheck.testsuite.yaml | 16 + .../v1beta1/stable.machineset.testsuite.yaml | 15 + .../api/machine/v1beta1/types_awsprovider.go | 311 + .../machine/v1beta1/types_azureprovider.go | 568 + .../api/machine/v1beta1/types_gcpprovider.go | 284 + .../api/machine/v1beta1/types_machine.go | 388 + .../v1beta1/types_machinehealthcheck.go | 142 + .../api/machine/v1beta1/types_machineset.go | 145 + .../api/machine/v1beta1/types_provider.go | 227 + .../machine/v1beta1/types_vsphereprovider.go | 210 + .../machine/v1beta1/zz_generated.deepcopy.go | 1861 ++ .../zz_generated.swagger_doc_generated.go | 808 + .../openshift/api/monitoring/.codegen.yaml | 8 + .../openshift/api/monitoring/install.go | 26 + ...00_50_monitoring_01_alertingrules.crd.yaml | 122 + ...monitoring_02_alertrelabelconfigs.crd.yaml | 140 + .../api/monitoring/v1alpha1/Makefile | 3 + .../openshift/api/monitoring/v1alpha1/doc.go | 6 + .../api/monitoring/v1alpha1/register.go | 41 + .../techpreview.alertingrule.testsuite.yaml | 24 + ...hpreview.alertrelabelconfig.testsuite.yaml | 20 + .../api/monitoring/v1alpha1/types.go | 349 + .../v1alpha1/zz_generated.deepcopy.go | 314 + .../zz_generated.swagger_doc_generated.go | 141 + .../github.com/openshift/api/network/OWNERS | 4 + .../openshift/api/network/install.go | 26 + .../network/v1/001-clusternetwork-crd.yaml | 102 + .../api/network/v1/002-hostsubnet-crd.yaml | 88 + .../api/network/v1/003-netnamespace-crd.yaml | 66 + .../v1/004-egressnetworkpolicy-crd.yaml | 71 + .../openshift/api/network/v1/Makefile | 3 + .../openshift/api/network/v1/constants.go | 17 + .../openshift/api/network/v1/doc.go | 8 + .../openshift/api/network/v1/generated.pb.go | 3186 +++ .../openshift/api/network/v1/generated.proto | 243 + .../openshift/api/network/v1/legacy.go | 27 + .../openshift/api/network/v1/register.go | 44 + .../v1/stable.clusternetwork.testsuite.yaml | 16 + .../stable.egressnetworkpolicy.testsuite.yaml | 16 + .../v1/stable.hostsubnet.testsuite.yaml | 18 + .../v1/stable.netnamespace.testsuite.yaml | 16 + .../openshift/api/network/v1/types.go | 300 + .../api/network/v1/zz_generated.deepcopy.go | 347 + .../v1/zz_generated.swagger_doc_generated.go | 145 + .../api/networkoperator/.codegen.yaml | 2 + .../openshift/api/networkoperator/OWNERS | 5 + .../openshift/api/networkoperator/install.go | 26 + .../v1/001-egressrouter.crd.yaml | 208 + .../v1/001-egressrouter.crd.yaml-patch | 26 + .../openshift/api/networkoperator/v1/Makefile | 3 + .../openshift/api/networkoperator/v1/doc.go | 5 + .../api/networkoperator/v1/generated.pb.go | 2552 +++ .../api/networkoperator/v1/generated.proto | 189 + .../api/networkoperator/v1/register.go | 25 + .../v1/stable.egressrouter.testsuite.yaml | 23 + .../networkoperator/v1/types_egressrouter.go | 265 + .../v1/zz_generated.deepcopy.go | 224 + .../v1/zz_generated.swagger_doc_generated.go | 119 + .../openshift/api/oauth/.codegen.yaml | 2 + .../github.com/openshift/api/oauth/install.go | 26 + .../github.com/openshift/api/oauth/v1/doc.go | 8 + .../openshift/api/oauth/v1/generated.pb.go | 4624 ++++ .../openshift/api/oauth/v1/generated.proto | 321 + .../openshift/api/oauth/v1/legacy.go | 30 + .../openshift/api/oauth/v1/register.go | 47 + .../openshift/api/oauth/v1/types.go | 341 + .../api/oauth/v1/zz_generated.deepcopy.go | 447 + .../v1/zz_generated.swagger_doc_generated.go | 171 + .../api/openshiftcontrolplane/.codegen.yaml | 2 + .../api/openshiftcontrolplane/install.go | 26 + .../api/openshiftcontrolplane/v1/doc.go | 7 + .../api/openshiftcontrolplane/v1/register.go | 40 + .../api/openshiftcontrolplane/v1/types.go | 429 + .../v1/zz_generated.deepcopy.go | 679 + .../v1/zz_generated.swagger_doc_generated.go | 257 + .../openshift/api/operator/.codegen.yaml | 8 + .../openshift/api/operator/install.go | 27 + ...0000_10_config-operator_01_config.crd.yaml | 2 +- .../0000_12_etcd-operator_01_config.crd.yaml | 2 +- ...kube-apiserver-operator_01_config.crd.yaml | 2 +- ...roller-manager-operator_01_config.crd.yaml | 2 +- ...kube-scheduler-operator_01_config.crd.yaml | 2 +- ...hift-apiserver-operator_01_config.crd.yaml | 2 +- ...oud-credential-operator_00_config.crd.yaml | 3 +- ...rsion-migrator-operator_00_config.crd.yaml | 2 +- ...authentication-operator_01_config.crd.yaml | 2 +- ...roller-manager-operator_02_config.crd.yaml | 2 +- ...00_50_cluster_storage_operator_01_crd.yaml | 12 +- ...ess-operator_00-ingresscontroller.crd.yaml | 160 +- ...ghts-operator_00-insightsoperator.crd.yaml | 2 +- .../0000_50_service-ca-operator_02_crd.yaml | 2 +- ...00_70_cluster-network-operator_01.crd.yaml | 13 +- .../v1/0000_70_dns-operator_00.crd.yaml | 12 + ...i_snapshot_controller_operator_01_crd.yaml | 2 +- ...0_90_cluster_csi_driver_01_config.crd.yaml | 77 +- ...luster_csi_driver_01_config.crd.yaml-patch | 1 + ....crd.yaml => 00_console-operator.crd.yaml} | 17 +- .../operator/v1/stable.console.testsuite.yaml | 2 +- .../stable.ingresscontroller.testsuite.yaml | 463 + .../operator/v1/stable.storage.testsuite.yaml | 97 + .../openshift/api/operator/v1/types.go | 15 +- .../api/operator/v1/types_authentication.go | 8 +- .../api/operator/v1/types_cloudcredential.go | 8 +- .../openshift/api/operator/v1/types_config.go | 8 +- .../api/operator/v1/types_console.go | 36 +- .../operator/v1/types_csi_cluster_driver.go | 139 +- .../api/operator/v1/types_csi_snapshot.go | 11 +- .../openshift/api/operator/v1/types_dns.go | 62 +- .../openshift/api/operator/v1/types_etcd.go | 8 +- .../api/operator/v1/types_ingress.go | 233 +- .../api/operator/v1/types_insights.go | 11 +- .../api/operator/v1/types_kubeapiserver.go | 8 +- .../v1/types_kubecontrollermanager.go | 8 +- .../v1/types_kubestorageversionmigrator.go | 8 +- .../api/operator/v1/types_network.go | 38 +- .../operator/v1/types_openshiftapiserver.go | 8 +- .../v1/types_openshiftcontrollermanager.go | 8 +- .../api/operator/v1/types_scheduler.go | 8 +- .../api/operator/v1/types_serviceca.go | 8 +- .../v1/types_servicecatalogapiserver.go | 8 +- .../types_servicecatalogcontrollermanager.go | 8 +- .../api/operator/v1/types_storage.go | 30 +- .../api/operator/v1/zz_generated.deepcopy.go | 216 + .../v1/zz_generated.swagger_doc_generated.go | 338 +- ...1_etcdbackup-TechPreviewNoUpgrade.crd.yaml | 114 + ...g-operator_01_olm-CustomNoUpgrade.crd.yaml | 140 + ...rator_01_olm-TechPreviewNoUpgrade.crd.yaml | 140 + .../v1alpha1/custom.olm.testsuite.yaml | 28 + .../api/operator/v1alpha1/register.go | 4 + .../techpreview.etcdbackup.testsuite.yaml | 38 + .../v1alpha1/techpreview.olm.testsuite.yaml | 28 + .../openshift/api/operator/v1alpha1/types.go | 29 +- .../api/operator/v1alpha1/types_etcdbackup.go | 101 + .../types_image_content_source_policy.go | 8 +- .../api/operator/v1alpha1/types_olm.go | 56 + .../v1alpha1/zz_generated.deepcopy.go | 217 + .../zz_generated.swagger_doc_generated.go | 90 +- .../api/operatorcontrolplane/.codegen.yaml | 2 + .../api/operatorcontrolplane/install.go | 26 + ...10-pod-network-connectivity-check.crd.yaml | 227 + .../operatorcontrolplane/v1alpha1/Makefile | 3 + .../api/operatorcontrolplane/v1alpha1/doc.go | 8 + .../operatorcontrolplane/v1alpha1/register.go | 39 + ...podnetworkconnectivitycheck.testsuite.yaml | 18 + .../v1alpha1/types_conditioncheck.go | 193 + .../v1alpha1/zz_generated.deepcopy.go | 199 + .../zz_generated.swagger_doc_generated.go | 95 + .../github.com/openshift/api/osin/install.go | 26 + .../github.com/openshift/api/osin/v1/doc.go | 7 + .../openshift/api/osin/v1/register.go | 50 + .../github.com/openshift/api/osin/v1/types.go | 488 + .../api/osin/v1/zz_generated.deepcopy.go | 645 + .../v1/zz_generated.swagger_doc_generated.go | 280 + .../api/pkg/serialization/serialization.go | 45 + .../github.com/openshift/api/project/OWNERS | 2 + .../openshift/api/project/install.go | 26 + .../openshift/api/project/v1/doc.go | 8 + .../openshift/api/project/v1/generated.pb.go | 1305 ++ .../openshift/api/project/v1/generated.proto | 90 + .../openshift/api/project/v1/legacy.go | 23 + .../openshift/api/project/v1/register.go | 40 + .../openshift/api/project/v1/types.go | 111 + .../api/project/v1/zz_generated.deepcopy.go | 142 + .../v1/zz_generated.swagger_doc_generated.go | 65 + vendor/github.com/openshift/api/quota/OWNERS | 3 + .../github.com/openshift/api/quota/install.go | 26 + ...openshift_01_clusterresourcequota.crd.yaml | 197 + .../openshift/api/quota/v1/Makefile | 3 + .../github.com/openshift/api/quota/v1/doc.go | 8 + .../openshift/api/quota/v1/generated.pb.go | 2152 ++ .../openshift/api/quota/v1/generated.proto | 124 + .../openshift/api/quota/v1/legacy.go | 24 + .../openshift/api/quota/v1/register.go | 41 + ...stable.clusterresourcequota.testsuite.yaml | 18 + .../openshift/api/quota/v1/types.go | 139 + .../api/quota/v1/zz_generated.deepcopy.go | 242 + .../v1/zz_generated.swagger_doc_generated.go | 96 + .../openshift/api/route/.codegen.yaml | 8 + vendor/github.com/openshift/api/route/OWNERS | 5 + .../github.com/openshift/api/route/install.go | 26 + .../openshift/api/route/v1/Makefile | 3 + .../api/route/v1/custom.route.testsuite.yaml | 103 + .../github.com/openshift/api/route/v1/doc.go | 8 + .../openshift/api/route/v1/generated.pb.go | 4276 ++++ .../openshift/api/route/v1/generated.proto | 456 + .../openshift/api/route/v1/legacy.go | 22 + .../openshift/api/route/v1/register.go | 39 + .../route/v1/route-CustomNoUpgrade.crd.yaml | 364 + .../v1/route-TechPreviewNoUpgrade.crd.yaml | 364 + .../openshift/api/route/v1/route.crd.yaml | 407 + .../api/route/v1/route.crd.yaml-patch | 67 + .../api/route/v1/stable.route.testsuite.yaml | 675 + .../route/v1/techpreview.route.testsuite.yaml | 103 + .../api/route/v1/test-route-validation.sh | 476 + .../openshift/api/route/v1/types.go | 537 + .../api/route/v1/zz_generated.deepcopy.go | 368 + .../v1/zz_generated.swagger_doc_generated.go | 189 + .../openshift/api/samples/.codegen.yaml | 2 + .../openshift/api/samples/install.go | 26 + .../api/samples/v1/00_samplesconfig.crd.yaml | 127 + .../openshift/api/samples/v1/Makefile | 3 + .../openshift/api/samples/v1/doc.go | 7 + .../openshift/api/samples/v1/generated.pb.go | 1847 ++ .../openshift/api/samples/v1/generated.proto | 156 + .../openshift/api/samples/v1/register.go | 51 + .../samples/v1/stable.config.testsuite.yaml | 14 + .../openshift/api/samples/v1/types_config.go | 240 + .../api/samples/v1/zz_generated.deepcopy.go | 158 + .../v1/zz_generated.swagger_doc_generated.go | 74 + .../openshift/api/security/install.go | 26 + ...0000_03_security-openshift_01_scc.crd.yaml | 279 + .../openshift/api/security/v1/Makefile | 3 + .../openshift/api/security/v1/consts.go | 13 + .../openshift/api/security/v1/doc.go | 8 + .../openshift/api/security/v1/generated.pb.go | 5283 +++++ .../openshift/api/security/v1/generated.proto | 380 + .../openshift/api/security/v1/legacy.go | 25 + .../openshift/api/security/v1/register.go | 44 + ....securitycontextconstraints.testsuite.yaml | 36 + .../openshift/api/security/v1/types.go | 468 + .../api/security/v1/zz_generated.deepcopy.go | 533 + .../v1/zz_generated.swagger_doc_generated.go | 228 + .../api/servicecertsigner/.codegen.yaml | 2 + .../api/servicecertsigner/install.go | 26 + .../api/servicecertsigner/v1alpha1/doc.go | 6 + .../servicecertsigner/v1alpha1/register.go | 40 + .../api/servicecertsigner/v1alpha1/types.go | 53 + .../v1alpha1/zz_generated.deepcopy.go | 105 + .../zz_generated.swagger_doc_generated.go | 33 + .../api/sharedresource/.codegen.yaml | 2 + .../openshift/api/sharedresource/OWNERS | 5 + .../openshift/api/sharedresource/install.go | 26 + .../v1alpha1/0000_10_sharedconfigmap.crd.yaml | 105 + .../v1alpha1/0000_10_sharedsecret.crd.yaml | 105 + .../api/sharedresource/v1alpha1/Makefile | 3 + .../api/sharedresource/v1alpha1/doc.go | 7 + .../api/sharedresource/v1alpha1/register.go | 53 + .../stable.sharedconfigmap.testsuite.yaml | 20 + .../stable.sharedsecret.testsuite.yaml | 20 + .../v1alpha1/types_shared_configmap.go | 93 + .../v1alpha1/types_shared_secret.go | 93 + .../v1alpha1/zz_generated.deepcopy.go | 245 + .../zz_generated.swagger_doc_generated.go | 112 + .../github.com/openshift/api/template/OWNERS | 4 + .../openshift/api/template/install.go | 26 + .../openshift/api/template/v1/codec.go | 33 + .../openshift/api/template/v1/consts.go | 16 + .../openshift/api/template/v1/doc.go | 8 + .../openshift/api/template/v1/generated.pb.go | 4115 ++++ .../openshift/api/template/v1/generated.proto | 262 + .../openshift/api/template/v1/legacy.go | 24 + .../openshift/api/template/v1/register.go | 43 + .../openshift/api/template/v1/types.go | 294 + .../api/template/v1/zz_generated.deepcopy.go | 394 + .../v1/zz_generated.swagger_doc_generated.go | 159 + .../github.com/openshift/api/user/install.go | 26 + .../github.com/openshift/api/user/v1/doc.go | 8 + .../openshift/api/user/v1/generated.pb.go | 2274 ++ .../openshift/api/user/v1/generated.proto | 144 + .../openshift/api/user/v1/legacy.go | 27 + .../openshift/api/user/v1/register.go | 44 + .../github.com/openshift/api/user/v1/types.go | 174 + .../api/user/v1/zz_generated.deepcopy.go | 258 + .../v1/zz_generated.swagger_doc_generated.go | 90 + .../build/v1/binarybuildsource.go | 23 + .../build/v1/bitbucketwebhookcause.go | 31 + .../applyconfigurations/build/v1/build.go | 242 + .../build/v1/buildcondition.go | 74 + .../build/v1/buildconfig.go | 242 + .../build/v1/buildconfigspec.go | 141 + .../build/v1/buildconfigstatus.go | 37 + .../build/v1/buildoutput.go | 50 + .../build/v1/buildpostcommitspec.go | 45 + .../build/v1/buildsource.go | 115 + .../applyconfigurations/build/v1/buildspec.go | 114 + .../build/v1/buildstatus.go | 149 + .../build/v1/buildstatusoutput.go | 23 + .../build/v1/buildstatusoutputto.go | 23 + .../build/v1/buildstrategy.go | 63 + .../build/v1/buildtriggercause.go | 68 + .../build/v1/buildtriggerpolicy.go | 72 + .../build/v1/buildvolume.go | 46 + .../build/v1/buildvolumemount.go | 23 + .../build/v1/buildvolumesource.go | 55 + .../build/v1/commonspec.go | 109 + .../build/v1/commonwebhookcause.go | 32 + .../build/v1/configmapbuildsource.go | 36 + .../build/v1/custombuildstrategy.go | 88 + .../build/v1/dockerbuildstrategy.go | 109 + .../build/v1/genericwebhookcause.go | 32 + .../build/v1/gitbuildsource.go | 57 + .../build/v1/githubwebhookcause.go | 32 + .../build/v1/gitlabwebhookcause.go | 31 + .../build/v1/gitsourcerevision.go | 50 + .../build/v1/imagechangecause.go | 36 + .../build/v1/imagechangetrigger.go | 45 + .../build/v1/imagechangetriggerstatus.go | 45 + .../build/v1/imagelabel.go | 32 + .../build/v1/imagesource.go | 61 + .../build/v1/imagesourcepath.go | 32 + .../build/v1/imagestreamtagreference.go | 32 + .../build/v1/jenkinspipelinebuildstrategy.go | 47 + .../build/v1/proxyconfig.go | 41 + .../build/v1/secretbuildsource.go | 36 + .../build/v1/secretlocalreference.go | 23 + .../build/v1/secretspec.go | 36 + .../build/v1/sourcebuildstrategy.go | 88 + .../build/v1/sourcecontroluser.go | 32 + .../build/v1/sourcerevision.go | 36 + .../applyconfigurations/build/v1/stageinfo.go | 60 + .../applyconfigurations/build/v1/stepinfo.go | 46 + .../build/v1/webhooktrigger.go | 41 + .../applyconfigurations/internal/internal.go | 1195 ++ .../build/clientset/versioned/clientset.go | 105 + .../build/clientset/versioned/doc.go | 4 + .../build/clientset/versioned/scheme/doc.go | 4 + .../clientset/versioned/scheme/register.go | 40 + .../versioned/typed/build/v1/build.go | 273 + .../versioned/typed/build/v1/build_client.go | 96 + .../versioned/typed/build/v1/buildconfig.go | 257 + .../clientset/versioned/typed/build/v1/doc.go | 4 + .../typed/build/v1/generated_expansion.go | 7 + .../config/v1/awsdnsspec.go | 23 + .../config/v1/azureplatformstatus.go | 22 +- .../config/v1/azureresourcetag.go | 32 + .../v1/baremetalplatformloadbalancer.go | 27 + .../config/v1/baremetalplatformstatus.go | 19 +- .../config/v1/customfeaturegates.go | 12 +- .../config/v1/dnsplatformspec.go | 36 + .../applyconfigurations/config/v1/dnsspec.go | 15 +- .../config/v1/externalplatformspec.go | 23 + .../config/v1/featuregate.go | 8 +- .../config/v1/featuregateattributes.go | 27 + .../config/v1/featuregatedetails.go | 51 + .../config/v1/featuregatestatus.go | 43 + .../config/v1/infrastructurestatus.go | 9 + .../config/v1/ingressspec.go | 2 +- .../config/v1/nutanixplatformloadbalancer.go | 27 + .../config/v1/nutanixplatformstatus.go | 17 +- .../v1/openstackplatformloadbalancer.go | 27 + .../config/v1/openstackplatformstatus.go | 21 +- .../config/v1/ovirtplatformloadbalancer.go | 27 + .../config/v1/ovirtplatformstatus.go | 19 +- .../config/v1/platformspec.go | 41 +- .../config/v1/platformstatus.go | 9 + .../config/v1/powervsplatformstatus.go | 9 + .../applyconfigurations/config/v1/update.go | 19 +- .../v1/vsphereplatformfailuredomainspec.go | 59 + .../config/v1/vsphereplatformloadbalancer.go | 27 + .../v1/vsphereplatformnodenetworking.go | 32 + .../v1/vsphereplatformnodenetworkingspec.go | 45 + .../config/v1/vsphereplatformspec.go | 51 + .../config/v1/vsphereplatformstatus.go | 19 +- .../config/v1/vsphereplatformtopology.go | 70 + .../config/v1/vsphereplatformvcenterspec.go | 43 + .../config/v1alpha1/gatherconfig.go | 38 + .../config/v1alpha1/insightsdatagather.go | 240 + .../config/v1alpha1/insightsdatagatherspec.go | 23 + .../applyconfigurations/internal/internal.go | 394 +- .../config/clientset/versioned/clientset.go | 15 +- .../clientset/versioned/scheme/register.go | 16 +- .../typed/config/v1alpha1/config_client.go | 91 + .../versioned/typed/config/v1alpha1/doc.go | 4 + .../config/v1alpha1/generated_expansion.go | 5 + .../config/v1alpha1/insightsdatagather.go | 227 + .../externalversions/config/interface.go | 8 + .../config/v1alpha1/insightsdatagather.go | 73 + .../config/v1alpha1/interface.go | 29 + .../informers/externalversions/generic.go | 5 + .../config/v1alpha1/expansion_generated.go | 7 + .../config/v1alpha1/insightsdatagather.go | 52 + .../applyconfigurations/image/v1/image.go | 330 + .../image/v1/imagelayer.go | 41 + .../image/v1/imagelookuppolicy.go | 23 + .../image/v1/imagemanifest.go | 68 + .../image/v1/imagesignature.go | 269 + .../image/v1/imagestream.go | 242 + .../image/v1/imagestreammapping.go | 242 + .../image/v1/imagestreamspec.go | 46 + .../image/v1/imagestreamstatus.go | 46 + .../image/v1/namedtageventlist.go | 51 + .../image/v1/signaturecondition.go | 74 + .../image/v1/signaturegenericentity.go | 32 + .../image/v1/signatureissuer.go | 31 + .../image/v1/signaturesubject.go | 40 + .../applyconfigurations/image/v1/tagevent.go | 54 + .../image/v1/tageventcondition.go | 74 + .../image/v1/tagimportpolicy.go | 45 + .../image/v1/tagreference.go | 87 + .../image/v1/tagreferencepolicy.go | 27 + .../applyconfigurations/internal/internal.go | 598 + .../image/clientset/versioned/clientset.go | 105 + .../image/clientset/versioned/doc.go | 4 + .../image/clientset/versioned/scheme/doc.go | 4 + .../clientset/versioned/scheme/register.go | 40 + .../clientset/versioned/typed/image/v1/doc.go | 4 + .../typed/image/v1/generated_expansion.go | 19 + .../versioned/typed/image/v1/image.go | 181 + .../versioned/typed/image/v1/image_client.go | 126 + .../typed/image/v1/imagesignature.go | 59 + .../versioned/typed/image/v1/imagestream.go | 271 + .../typed/image/v1/imagestreamimage.go | 51 + .../typed/image/v1/imagestreamimport.go | 51 + .../typed/image/v1/imagestreammapping.go | 83 + .../typed/image/v1/imagestreamtag.go | 111 + .../versioned/typed/image/v1/imagetag.go | 111 + .../applyconfigurations/internal/internal.go | 275 +- .../operator/v1/awscsidriverconfigspec.go | 23 + .../operator/v1/azurecsidriverconfigspec.go | 23 + .../operator/v1/azurediskencryptionset.go | 41 + .../operator/v1/clustercsidriverspec.go | 11 +- .../operator/v1/consolecustomization.go | 14 + .../operator/v1/csidriverconfigspec.go | 63 + .../developerconsolecatalogcustomization.go | 9 + .../v1/developerconsolecatalogtypes.go | 45 + .../operator/v1/forwardplugin.go | 15 +- .../operator/v1/gatewayconfig.go | 15 +- .../operator/v1/gcpcsidriverconfigspec.go | 23 + .../operator/v1/gcpkmskeyreference.go | 50 + .../operator/v1/ibmloadbalancerparameters.go | 27 + .../operator/v1/insightsreport.go | 13 + .../operator/v1/kubeapiserverstatus.go | 14 + .../operator/v1/loadbalancerstrategy.go | 11 + .../operator/v1/perspective.go | 53 + .../operator/v1/perspectivevisibility.go | 36 + .../operator/v1/pinnedresourcereference.go | 41 + .../operator/v1/policyauditconfig.go | 9 + .../v1/providerloadbalancerparameters.go | 9 + .../v1/resourceattributesaccessreview.go | 40 + .../operator/v1/serviceaccountissuerstatus.go | 36 + .../operator/v1/storagespec.go | 9 + .../operator/v1/upstreamresolvers.go | 15 +- .../operator/v1/vspherecsidriverconfigspec.go | 25 + .../clientset/versioned/scheme/register.go | 14 +- .../github.com/openshift/library-go/LICENSE | 201 + .../pkg/controller/factory/base_controller.go | 276 + .../controller/factory/controller_context.go | 116 + .../pkg/controller/factory/eventfilters.go | 26 + .../pkg/controller/factory/factory.go | 309 + .../pkg/controller/factory/interfaces.go | 47 + .../openshift/library-go/pkg/crypto/OWNERS | 4 + .../openshift/library-go/pkg/crypto/crypto.go | 1252 ++ .../library-go/pkg/crypto/rotation.go | 20 + .../pkg/operator/condition/condition.go | 72 + .../config_observer_controller.go | 284 + .../featuregates/featuregate.go | 47 + .../hardcoded_featuregate_reader.go | 78 + .../featuregates/observe_featuregates.go | 118 + .../featuregates/simple_featuregate_reader.go | 318 + .../operator/configobserver/unstructured.go | 45 + .../library-go/pkg/operator/events/OWNERS | 8 + .../pkg/operator/events/recorder.go | 238 + .../pkg/operator/events/recorder_in_memory.go | 86 + .../pkg/operator/events/recorder_logging.go | 58 + .../pkg/operator/events/recorder_upstream.go | 173 + .../operator/management/management_state.go | 77 + .../resourceapply/admissionregistration.go | 166 + .../resource/resourceapply/apiextensions.go | 56 + .../resource/resourceapply/apiregistration.go | 51 + .../operator/resource/resourceapply/apps.go | 246 + .../operator/resource/resourceapply/core.go | 657 + .../resourceapply/credentialsrequest.go | 106 + .../resource/resourceapply/event_helpers.go | 56 + .../resource/resourceapply/generic.go | 371 + .../resourceapply/json_patch_helpers.go | 70 + .../resource/resourceapply/migration.go | 59 + .../resource/resourceapply/monitoring.go | 168 + .../operator/resource/resourceapply/policy.go | 60 + .../operator/resource/resourceapply/rbac.go | 246 + .../resource/resourceapply/resource_cache.go | 168 + .../resource/resourceapply/storage.go | 259 + .../resource/resourceapply/unstructured.go | 42 + .../resourceapply/volumesnapshotclass.go | 129 + .../resourcehelper/resource_helpers.go | 76 + .../resourcemerge/admissionregistration.go | 51 + .../resource/resourcemerge/apiextensions.go | 68 + .../operator/resource/resourcemerge/apps.go | 80 + .../resourcemerge/generic_config_merger.go | 271 + .../resource/resourcemerge/object_merger.go | 277 + .../resource/resourceread/admission.go | 35 + .../resource/resourceread/apiextensions.go | 35 + .../operator/resource/resourceread/apps.go | 34 + .../operator/resource/resourceread/core.go | 78 + .../operator/resource/resourceread/generic.go | 57 + .../operator/resource/resourceread/images.go | 26 + .../resource/resourceread/migration.go | 26 + .../operator/resource/resourceread/policy.go | 25 + .../operator/resource/resourceread/rbac.go | 50 + .../operator/resource/resourceread/route.go | 26 + .../operator/resource/resourceread/storage.go | 43 + .../resource/resourceread/unstructured.go | 18 + .../operator/resourcesynccontroller/core.go | 67 + .../resourcesynccontroller/interfaces.go | 41 + .../resourcesync_controller.go | 340 + .../library-go/pkg/operator/v1helpers/args.go | 61 + .../pkg/operator/v1helpers/core_getters.go | 127 + .../pkg/operator/v1helpers/fake_informers.go | 7 + .../pkg/operator/v1helpers/helpers.go | 485 + .../pkg/operator/v1helpers/informers.go | 135 + .../pkg/operator/v1helpers/interfaces.go | 43 + .../pkg/operator/v1helpers/test_helpers.go | 302 + .../internal/clients/builder.go | 16 +- .../v1/helpers.go | 7 +- .../v1/register.go | 3 - .../v1/types.go | 57 +- .../v1/zz_generated.deepcopy.go | 63 + .../pkg/controller/common/constants.go | 17 + .../controller/common/controller_context.go | 33 +- .../pkg/controller/common/helpers.go | 467 +- .../controller/common/layered_node_state.go | 215 + .../controller/common/layered_pool_state.go | 69 + .../pkg/controller/common/metrics.go | 87 +- .../pkg/daemon/constants/constants.go | 19 +- .../clientset/versioned/clientset.go | 3 +- .../clientset/versioned/scheme/register.go | 14 +- .../informers/externalversions/factory.go | 79 +- .../pkg/version/version.go | 54 + .../pkg/version/version_fcos.go | 7 + .../pkg/version/version_scos.go | 7 + vendor/github.com/robfig/cron/.gitignore | 22 + vendor/github.com/robfig/cron/.travis.yml | 1 + vendor/github.com/robfig/cron/LICENSE | 21 + vendor/github.com/robfig/cron/README.md | 6 + .../github.com/robfig/cron/constantdelay.go | 27 + vendor/github.com/robfig/cron/cron.go | 259 + vendor/github.com/robfig/cron/doc.go | 129 + vendor/github.com/robfig/cron/parser.go | 380 + vendor/github.com/robfig/cron/spec.go | 158 + vendor/k8s.io/apiserver/LICENSE | 202 + .../apiserver/pkg/authentication/user/doc.go | 19 + .../apiserver/pkg/authentication/user/user.go | 84 + vendor/k8s.io/component-base/metrics/OWNERS | 11 + .../k8s.io/component-base/metrics/buckets.go | 43 + .../component-base/metrics/collector.go | 190 + .../k8s.io/component-base/metrics/counter.go | 242 + vendor/k8s.io/component-base/metrics/desc.go | 225 + vendor/k8s.io/component-base/metrics/gauge.go | 277 + .../component-base/metrics/histogram.go | 214 + vendor/k8s.io/component-base/metrics/http.go | 87 + .../k8s.io/component-base/metrics/labels.go | 22 + .../metrics/legacyregistry/registry.go | 92 + .../k8s.io/component-base/metrics/metric.go | 235 + .../k8s.io/component-base/metrics/options.go | 125 + vendor/k8s.io/component-base/metrics/opts.go | 356 + .../metrics/processstarttime.go | 51 + .../metrics/processstarttime_others.go | 39 + .../metrics/processstarttime_windows.go | 34 + .../prometheusextension/timing_histogram.go | 189 + .../timing_histogram_vec.go | 111 + .../prometheusextension/weighted_histogram.go | 203 + .../weighted_histogram_vec.go | 106 + .../k8s.io/component-base/metrics/registry.go | 385 + .../k8s.io/component-base/metrics/summary.go | 226 + .../metrics/timing_histogram.go | 270 + vendor/k8s.io/component-base/metrics/value.go | 70 + .../k8s.io/component-base/metrics/version.go | 37 + .../component-base/metrics/version_parser.go | 50 + .../k8s.io/component-base/metrics/wrappers.go | 167 + vendor/k8s.io/kube-aggregator/LICENSE | 202 + .../pkg/apis/apiregistration/doc.go | 21 + .../pkg/apis/apiregistration/helpers.go | 128 + .../pkg/apis/apiregistration/register.go | 54 + .../pkg/apis/apiregistration/types.go | 146 + .../pkg/apis/apiregistration/v1/defaults.go | 33 + .../pkg/apis/apiregistration/v1/doc.go | 37 + .../apis/apiregistration/v1/generated.pb.go | 1814 ++ .../apis/apiregistration/v1/generated.proto | 151 + .../pkg/apis/apiregistration/v1/register.go | 61 + .../pkg/apis/apiregistration/v1/types.go | 162 + .../v1/zz_generated.conversion.go | 299 + .../v1/zz_generated.deepcopy.go | 174 + .../v1/zz_generated.defaults.go | 48 + .../apis/apiregistration/v1beta1/defaults.go | 33 + .../pkg/apis/apiregistration/v1beta1/doc.go | 38 + .../apiregistration/v1beta1/generated.pb.go | 1814 ++ .../apiregistration/v1beta1/generated.proto | 151 + .../apis/apiregistration/v1beta1/register.go | 61 + .../pkg/apis/apiregistration/v1beta1/types.go | 168 + .../v1beta1/zz_generated.conversion.go | 299 + .../v1beta1/zz_generated.deepcopy.go | 174 + .../v1beta1/zz_generated.defaults.go | 48 + .../zz_generated.prerelease-lifecycle.go | 74 + .../apiregistration/zz_generated.deepcopy.go | 221 + .../clientset/scheme/doc.go | 20 + .../clientset/scheme/register.go | 58 + .../v1/apiregistration_client.go | 107 + .../typed/apiregistration/v1/apiservice.go | 184 + .../clientset/typed/apiregistration/v1/doc.go | 20 + .../apiregistration/v1/generated_expansion.go | 21 + .../pkg/apis/deviceplugin/v1beta1/api.pb.go | 132 +- .../pkg/apis/deviceplugin/v1beta1/api.proto | 3 +- vendor/modules.txt | 174 +- .../kube-storage-version-migrator/LICENSE | 202 + .../pkg/apis/migration/v1alpha1/doc.go | 20 + .../pkg/apis/migration/v1alpha1/register.go | 54 + .../pkg/apis/migration/v1alpha1/types.go | 186 + .../v1alpha1/zz_generated.deepcopy.go | 276 + .../pkg/clients/clientset/clientset.go | 120 + .../pkg/clients/clientset/doc.go | 20 + .../pkg/clients/clientset/scheme/doc.go | 20 + .../pkg/clients/clientset/scheme/register.go | 56 + .../clientset/typed/migration/v1alpha1/doc.go | 20 + .../migration/v1alpha1/generated_expansion.go | 23 + .../migration/v1alpha1/migration_client.go | 112 + .../typed/migration/v1alpha1/storagestate.go | 184 + .../v1alpha1/storageversionmigration.go | 184 + 969 files changed, 194750 insertions(+), 1218 deletions(-) create mode 100644 vendor/github.com/ajeddeloh/go-json/OWNERS create mode 100644 vendor/github.com/aws/aws-sdk-go/LICENSE.txt create mode 100644 vendor/github.com/aws/aws-sdk-go/NOTICE.txt create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/arn/arn.go create mode 100644 vendor/github.com/blang/semver/v4/LICENSE create mode 100644 vendor/github.com/blang/semver/v4/json.go create mode 100644 vendor/github.com/blang/semver/v4/range.go create mode 100644 vendor/github.com/blang/semver/v4/semver.go create mode 100644 vendor/github.com/blang/semver/v4/sort.go create mode 100644 vendor/github.com/blang/semver/v4/sql.go create mode 100644 vendor/github.com/coreos/ign-converter/translate/v33tov32/v33tov32.go create mode 100644 vendor/github.com/coreos/ign-converter/translate/v34tov33/v34tov33.go create mode 100644 vendor/github.com/coreos/ignition/v2/config/shared/parse/unit.go create mode 100644 vendor/github.com/coreos/ignition/v2/config/v3_0/types/systemd.go create mode 100644 vendor/github.com/coreos/ignition/v2/config/v3_1/types/systemd.go create mode 100644 vendor/github.com/coreos/ignition/v2/config/v3_2/types/systemd.go create mode 100644 vendor/github.com/coreos/ignition/v2/config/v3_3/config.go create mode 100644 vendor/github.com/coreos/ignition/v2/config/v3_3/translate/translate.go create mode 100644 vendor/github.com/coreos/ignition/v2/config/v3_3/types/clevis.go create mode 100644 vendor/github.com/coreos/ignition/v2/config/v3_3/types/config.go create mode 100644 vendor/github.com/coreos/ignition/v2/config/v3_3/types/device.go create mode 100644 vendor/github.com/coreos/ignition/v2/config/v3_3/types/directory.go create mode 100644 vendor/github.com/coreos/ignition/v2/config/v3_3/types/disk.go create mode 100644 vendor/github.com/coreos/ignition/v2/config/v3_3/types/file.go create mode 100644 vendor/github.com/coreos/ignition/v2/config/v3_3/types/filesystem.go create mode 100644 vendor/github.com/coreos/ignition/v2/config/v3_3/types/headers.go create mode 100644 vendor/github.com/coreos/ignition/v2/config/v3_3/types/ignition.go create mode 100644 vendor/github.com/coreos/ignition/v2/config/v3_3/types/kargs.go create mode 100644 vendor/github.com/coreos/ignition/v2/config/v3_3/types/luks.go create mode 100644 vendor/github.com/coreos/ignition/v2/config/v3_3/types/mode.go create mode 100644 vendor/github.com/coreos/ignition/v2/config/v3_3/types/node.go create mode 100644 vendor/github.com/coreos/ignition/v2/config/v3_3/types/partition.go create mode 100644 vendor/github.com/coreos/ignition/v2/config/v3_3/types/passwd.go create mode 100644 vendor/github.com/coreos/ignition/v2/config/v3_3/types/path.go create mode 100644 vendor/github.com/coreos/ignition/v2/config/v3_3/types/proxy.go create mode 100644 vendor/github.com/coreos/ignition/v2/config/v3_3/types/raid.go create mode 100644 vendor/github.com/coreos/ignition/v2/config/v3_3/types/resource.go create mode 100644 vendor/github.com/coreos/ignition/v2/config/v3_3/types/schema.go create mode 100644 vendor/github.com/coreos/ignition/v2/config/v3_3/types/storage.go create mode 100644 vendor/github.com/coreos/ignition/v2/config/v3_3/types/systemd.go create mode 100644 vendor/github.com/coreos/ignition/v2/config/v3_3/types/tang.go create mode 100644 vendor/github.com/coreos/ignition/v2/config/v3_3/types/tls.go create mode 100644 vendor/github.com/coreos/ignition/v2/config/v3_3/types/unit.go create mode 100644 vendor/github.com/coreos/ignition/v2/config/v3_3/types/url.go create mode 100644 vendor/github.com/coreos/ignition/v2/config/v3_3/types/verification.go create mode 100644 vendor/github.com/coreos/ignition/v2/config/v3_4/config.go create mode 100644 vendor/github.com/coreos/ignition/v2/config/v3_4/translate/translate.go create mode 100644 vendor/github.com/coreos/ignition/v2/config/v3_4/types/clevis.go create mode 100644 vendor/github.com/coreos/ignition/v2/config/v3_4/types/config.go create mode 100644 vendor/github.com/coreos/ignition/v2/config/v3_4/types/device.go create mode 100644 vendor/github.com/coreos/ignition/v2/config/v3_4/types/directory.go create mode 100644 vendor/github.com/coreos/ignition/v2/config/v3_4/types/disk.go create mode 100644 vendor/github.com/coreos/ignition/v2/config/v3_4/types/file.go create mode 100644 vendor/github.com/coreos/ignition/v2/config/v3_4/types/filesystem.go create mode 100644 vendor/github.com/coreos/ignition/v2/config/v3_4/types/headers.go create mode 100644 vendor/github.com/coreos/ignition/v2/config/v3_4/types/ignition.go create mode 100644 vendor/github.com/coreos/ignition/v2/config/v3_4/types/kargs.go create mode 100644 vendor/github.com/coreos/ignition/v2/config/v3_4/types/luks.go create mode 100644 vendor/github.com/coreos/ignition/v2/config/v3_4/types/mode.go create mode 100644 vendor/github.com/coreos/ignition/v2/config/v3_4/types/node.go create mode 100644 vendor/github.com/coreos/ignition/v2/config/v3_4/types/partition.go create mode 100644 vendor/github.com/coreos/ignition/v2/config/v3_4/types/passwd.go create mode 100644 vendor/github.com/coreos/ignition/v2/config/v3_4/types/path.go create mode 100644 vendor/github.com/coreos/ignition/v2/config/v3_4/types/proxy.go create mode 100644 vendor/github.com/coreos/ignition/v2/config/v3_4/types/raid.go create mode 100644 vendor/github.com/coreos/ignition/v2/config/v3_4/types/resource.go create mode 100644 vendor/github.com/coreos/ignition/v2/config/v3_4/types/schema.go create mode 100644 vendor/github.com/coreos/ignition/v2/config/v3_4/types/storage.go create mode 100644 vendor/github.com/coreos/ignition/v2/config/v3_4/types/systemd.go create mode 100644 vendor/github.com/coreos/ignition/v2/config/v3_4/types/tang.go create mode 100644 vendor/github.com/coreos/ignition/v2/config/v3_4/types/tls.go create mode 100644 vendor/github.com/coreos/ignition/v2/config/v3_4/types/unit.go create mode 100644 vendor/github.com/coreos/ignition/v2/config/v3_4/types/url.go create mode 100644 vendor/github.com/coreos/ignition/v2/config/v3_4/types/verification.go create mode 100644 vendor/github.com/openshift/api/.ci-operator.yaml create mode 100644 vendor/github.com/openshift/api/.gitattributes create mode 100644 vendor/github.com/openshift/api/.gitignore create mode 100644 vendor/github.com/openshift/api/Makefile create mode 100644 vendor/github.com/openshift/api/OWNERS create mode 100644 vendor/github.com/openshift/api/README.md create mode 100644 vendor/github.com/openshift/api/apiserver/.codegen.yaml create mode 100644 vendor/github.com/openshift/api/apiserver/install.go create mode 100644 vendor/github.com/openshift/api/apiserver/v1/Makefile create mode 100644 vendor/github.com/openshift/api/apiserver/v1/apiserver.openshift.io_apirequestcount.yaml create mode 100644 vendor/github.com/openshift/api/apiserver/v1/doc.go create mode 100644 vendor/github.com/openshift/api/apiserver/v1/register.go create mode 100644 vendor/github.com/openshift/api/apiserver/v1/stable.apirequestcount.testsuite.yaml create mode 100644 vendor/github.com/openshift/api/apiserver/v1/types_apirequestcount.go create mode 100644 vendor/github.com/openshift/api/apiserver/v1/zz_generated.deepcopy.go create mode 100644 vendor/github.com/openshift/api/apiserver/v1/zz_generated.swagger_doc_generated.go create mode 100644 vendor/github.com/openshift/api/apps/OWNERS create mode 100644 vendor/github.com/openshift/api/apps/install.go create mode 100644 vendor/github.com/openshift/api/apps/v1/consts.go create mode 100644 vendor/github.com/openshift/api/apps/v1/deprecated_consts.go create mode 100644 vendor/github.com/openshift/api/apps/v1/doc.go create mode 100644 vendor/github.com/openshift/api/apps/v1/generated.pb.go create mode 100644 vendor/github.com/openshift/api/apps/v1/generated.proto create mode 100644 vendor/github.com/openshift/api/apps/v1/legacy.go create mode 100644 vendor/github.com/openshift/api/apps/v1/register.go create mode 100644 vendor/github.com/openshift/api/apps/v1/types.go create mode 100644 vendor/github.com/openshift/api/apps/v1/zz_generated.deepcopy.go create mode 100644 vendor/github.com/openshift/api/apps/v1/zz_generated.swagger_doc_generated.go create mode 100644 vendor/github.com/openshift/api/apps/v1/zz_prerelease_lifecycle_generated.go create mode 100644 vendor/github.com/openshift/api/authorization/install.go create mode 100644 vendor/github.com/openshift/api/authorization/v1/0000_03_authorization-openshift_01_rolebindingrestriction.crd.yaml create mode 100644 vendor/github.com/openshift/api/authorization/v1/Makefile create mode 100644 vendor/github.com/openshift/api/authorization/v1/codec.go create mode 100644 vendor/github.com/openshift/api/authorization/v1/doc.go create mode 100644 vendor/github.com/openshift/api/authorization/v1/generated.pb.go create mode 100644 vendor/github.com/openshift/api/authorization/v1/generated.proto create mode 100644 vendor/github.com/openshift/api/authorization/v1/legacy.go create mode 100644 vendor/github.com/openshift/api/authorization/v1/register.go create mode 100644 vendor/github.com/openshift/api/authorization/v1/stable.rolebindingrestriction.testsuite.yaml create mode 100644 vendor/github.com/openshift/api/authorization/v1/types.go create mode 100644 vendor/github.com/openshift/api/authorization/v1/zz_generated.deepcopy.go create mode 100644 vendor/github.com/openshift/api/authorization/v1/zz_generated.swagger_doc_generated.go create mode 100644 vendor/github.com/openshift/api/build/OWNERS create mode 100644 vendor/github.com/openshift/api/build/install.go create mode 100644 vendor/github.com/openshift/api/build/v1/consts.go create mode 100644 vendor/github.com/openshift/api/build/v1/doc.go create mode 100644 vendor/github.com/openshift/api/build/v1/generated.pb.go create mode 100644 vendor/github.com/openshift/api/build/v1/generated.proto create mode 100644 vendor/github.com/openshift/api/build/v1/legacy.go create mode 100644 vendor/github.com/openshift/api/build/v1/register.go create mode 100644 vendor/github.com/openshift/api/build/v1/types.go create mode 100644 vendor/github.com/openshift/api/build/v1/zz_generated.deepcopy.go create mode 100644 vendor/github.com/openshift/api/build/v1/zz_generated.swagger_doc_generated.go create mode 100644 vendor/github.com/openshift/api/cloudnetwork/OWNERS create mode 100644 vendor/github.com/openshift/api/cloudnetwork/install.go create mode 100644 vendor/github.com/openshift/api/cloudnetwork/v1/001-cloudprivateipconfig.crd.yaml create mode 100644 vendor/github.com/openshift/api/cloudnetwork/v1/001-cloudprivateipconfig.crd.yaml-patch create mode 100644 vendor/github.com/openshift/api/cloudnetwork/v1/Makefile create mode 100644 vendor/github.com/openshift/api/cloudnetwork/v1/doc.go create mode 100644 vendor/github.com/openshift/api/cloudnetwork/v1/generated.pb.go create mode 100644 vendor/github.com/openshift/api/cloudnetwork/v1/generated.proto create mode 100644 vendor/github.com/openshift/api/cloudnetwork/v1/register.go create mode 100644 vendor/github.com/openshift/api/cloudnetwork/v1/stable.cloudprivateipconfig.testsuite.yaml create mode 100644 vendor/github.com/openshift/api/cloudnetwork/v1/types.go create mode 100644 vendor/github.com/openshift/api/cloudnetwork/v1/zz_generated.deepcopy.go create mode 100644 vendor/github.com/openshift/api/cloudnetwork/v1/zz_generated.swagger_doc_generated.go create mode 100644 vendor/github.com/openshift/api/config/.codegen.yaml create mode 100644 vendor/github.com/openshift/api/config/install.go create mode 100644 vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver-CustomNoUpgrade.crd.yaml rename vendor/github.com/openshift/api/config/v1/{0000_10_config-operator_01_apiserver.crd.yaml => 0000_10_config-operator_01_apiserver-Default.crd.yaml} (99%) create mode 100644 vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver-TechPreviewNoUpgrade.crd.yaml create mode 100644 vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_dns-CustomNoUpgrade.crd.yaml rename vendor/github.com/openshift/api/config/v1/{0000_10_config-operator_01_dns.crd.yaml => 0000_10_config-operator_01_dns-Default.crd.yaml} (68%) create mode 100644 vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_dns-TechPreviewNoUpgrade.crd.yaml create mode 100644 vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure-CustomNoUpgrade.crd.yaml create mode 100644 vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure-CustomNoUpgrade.crd.yaml-patch create mode 100644 vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure-Default.crd.yaml-patch create mode 100644 vendor/github.com/openshift/api/config/v1/custom.apiserver.testsuite.yaml create mode 100644 vendor/github.com/openshift/api/config/v1/custom.dns.testsuite.yaml create mode 100644 vendor/github.com/openshift/api/config/v1/custom.infrastructure.testsuite.yaml create mode 100644 vendor/github.com/openshift/api/config/v1/feature_gates.go create mode 100644 vendor/github.com/openshift/api/config/v1/techpreview.apiserver.testsuite.yaml create mode 100644 vendor/github.com/openshift/api/config/v1/techpreview.dns.testsuite.yaml create mode 100644 vendor/github.com/openshift/api/config/v1alpha1/0000_10_config-operator_01_backup-TechPreviewNoUpgrade.crd.yaml create mode 100644 vendor/github.com/openshift/api/config/v1alpha1/0000_10_config-operator_01_insightsdatagather.crd.yaml create mode 100644 vendor/github.com/openshift/api/config/v1alpha1/Makefile create mode 100644 vendor/github.com/openshift/api/config/v1alpha1/doc.go create mode 100644 vendor/github.com/openshift/api/config/v1alpha1/register.go create mode 100644 vendor/github.com/openshift/api/config/v1alpha1/techpreview.backup.testsuite.yaml create mode 100644 vendor/github.com/openshift/api/config/v1alpha1/techpreview.insightsdatagather.testsuite.yaml create mode 100644 vendor/github.com/openshift/api/config/v1alpha1/types_backup.go create mode 100644 vendor/github.com/openshift/api/config/v1alpha1/types_insights.go create mode 100644 vendor/github.com/openshift/api/config/v1alpha1/zz_generated.deepcopy.go create mode 100644 vendor/github.com/openshift/api/config/v1alpha1/zz_generated.swagger_doc_generated.go create mode 100644 vendor/github.com/openshift/api/console/.codegen.yaml create mode 100644 vendor/github.com/openshift/api/console/OWNERS create mode 100644 vendor/github.com/openshift/api/console/install.go create mode 100644 vendor/github.com/openshift/api/console/v1/00_consoleclidownload.crd.yaml create mode 100644 vendor/github.com/openshift/api/console/v1/00_consoleexternalloglink.crd.yaml create mode 100644 vendor/github.com/openshift/api/console/v1/00_consolelink.crd.yaml create mode 100644 vendor/github.com/openshift/api/console/v1/00_consolenotification.crd.yaml create mode 100644 vendor/github.com/openshift/api/console/v1/00_consolequickstart.crd.yaml create mode 100644 vendor/github.com/openshift/api/console/v1/00_consolesample.crd.yaml create mode 100644 vendor/github.com/openshift/api/console/v1/00_consoleyamlsample.crd.yaml create mode 100644 vendor/github.com/openshift/api/console/v1/90_consoleplugin.crd.yaml create mode 100644 vendor/github.com/openshift/api/console/v1/Makefile create mode 100644 vendor/github.com/openshift/api/console/v1/doc.go create mode 100644 vendor/github.com/openshift/api/console/v1/register.go create mode 100644 vendor/github.com/openshift/api/console/v1/stable.consoleclidownload.testsuite.yaml create mode 100644 vendor/github.com/openshift/api/console/v1/stable.consoleexternalloglink.testsuite.yaml create mode 100644 vendor/github.com/openshift/api/console/v1/stable.consolelink.testsuite.yaml create mode 100644 vendor/github.com/openshift/api/console/v1/stable.consolenotification.testsuite.yaml create mode 100644 vendor/github.com/openshift/api/console/v1/stable.consoleplugin.testsuite.yaml create mode 100644 vendor/github.com/openshift/api/console/v1/stable.consolequickstart.testsuite.yaml create mode 100644 vendor/github.com/openshift/api/console/v1/stable.consolesample.testsuite.yaml create mode 100644 vendor/github.com/openshift/api/console/v1/stable.consoleyamlsample.testsuite.yaml create mode 100644 vendor/github.com/openshift/api/console/v1/types.go create mode 100644 vendor/github.com/openshift/api/console/v1/types_console_cli_download.go create mode 100644 vendor/github.com/openshift/api/console/v1/types_console_external_log_links.go create mode 100644 vendor/github.com/openshift/api/console/v1/types_console_link.go create mode 100644 vendor/github.com/openshift/api/console/v1/types_console_notification.go create mode 100644 vendor/github.com/openshift/api/console/v1/types_console_plugin.go create mode 100644 vendor/github.com/openshift/api/console/v1/types_console_quick_start.go create mode 100644 vendor/github.com/openshift/api/console/v1/types_console_sample.go create mode 100644 vendor/github.com/openshift/api/console/v1/types_console_yaml_sample.go create mode 100644 vendor/github.com/openshift/api/console/v1/zz_generated.deepcopy.go create mode 100644 vendor/github.com/openshift/api/console/v1/zz_generated.swagger_doc_generated.go create mode 100644 vendor/github.com/openshift/api/console/v1alpha1/90_consoleplugin.crd.yaml create mode 100644 vendor/github.com/openshift/api/console/v1alpha1/Makefile create mode 100644 vendor/github.com/openshift/api/console/v1alpha1/doc.go create mode 100644 vendor/github.com/openshift/api/console/v1alpha1/register.go create mode 100644 vendor/github.com/openshift/api/console/v1alpha1/stable.consoleplugin.testsuite.yaml create mode 100644 vendor/github.com/openshift/api/console/v1alpha1/types.go create mode 100644 vendor/github.com/openshift/api/console/v1alpha1/types_console_plugin.go create mode 100644 vendor/github.com/openshift/api/console/v1alpha1/zz_generated.deepcopy.go create mode 100644 vendor/github.com/openshift/api/console/v1alpha1/zz_generated.swagger_doc_generated.go create mode 100644 vendor/github.com/openshift/api/helm/.codegen.yaml create mode 100644 vendor/github.com/openshift/api/helm/install.go create mode 100644 vendor/github.com/openshift/api/helm/v1beta1/0000_10-helm-chart-repository.crd.yaml create mode 100644 vendor/github.com/openshift/api/helm/v1beta1/0000_10-project-helm-chart-repository.crd.yaml create mode 100644 vendor/github.com/openshift/api/helm/v1beta1/Makefile create mode 100644 vendor/github.com/openshift/api/helm/v1beta1/doc.go create mode 100644 vendor/github.com/openshift/api/helm/v1beta1/register.go create mode 100644 vendor/github.com/openshift/api/helm/v1beta1/stable.helmchartrepository.testsuite.yaml create mode 100644 vendor/github.com/openshift/api/helm/v1beta1/stable.projecthelmchartrepository.testsuite.yaml create mode 100644 vendor/github.com/openshift/api/helm/v1beta1/types_helm_chart_repository.go create mode 100644 vendor/github.com/openshift/api/helm/v1beta1/types_project_helm_chart_repository.go create mode 100644 vendor/github.com/openshift/api/helm/v1beta1/zz_generated.deepcopy.go create mode 100644 vendor/github.com/openshift/api/helm/v1beta1/zz_generated.swagger_doc_generated.go create mode 100644 vendor/github.com/openshift/api/image/.codegen.yaml create mode 100644 vendor/github.com/openshift/api/image/OWNERS create mode 100644 vendor/github.com/openshift/api/image/docker10/doc.go create mode 100644 vendor/github.com/openshift/api/image/docker10/register.go create mode 100644 vendor/github.com/openshift/api/image/docker10/types_docker.go create mode 100644 vendor/github.com/openshift/api/image/docker10/zz_generated.deepcopy.go create mode 100644 vendor/github.com/openshift/api/image/docker10/zz_generated.swagger_doc_generated.go create mode 100644 vendor/github.com/openshift/api/image/dockerpre012/deepcopy.go create mode 100644 vendor/github.com/openshift/api/image/dockerpre012/doc.go create mode 100644 vendor/github.com/openshift/api/image/dockerpre012/register.go create mode 100644 vendor/github.com/openshift/api/image/dockerpre012/types_docker.go create mode 100644 vendor/github.com/openshift/api/image/dockerpre012/zz_generated.deepcopy.go create mode 100644 vendor/github.com/openshift/api/image/dockerpre012/zz_generated.swagger_doc_generated.go create mode 100644 vendor/github.com/openshift/api/image/install.go create mode 100644 vendor/github.com/openshift/api/image/v1/consts.go create mode 100644 vendor/github.com/openshift/api/image/v1/doc.go create mode 100644 vendor/github.com/openshift/api/image/v1/generated.pb.go create mode 100644 vendor/github.com/openshift/api/image/v1/generated.proto create mode 100644 vendor/github.com/openshift/api/image/v1/legacy.go create mode 100644 vendor/github.com/openshift/api/image/v1/register.go create mode 100644 vendor/github.com/openshift/api/image/v1/types.go create mode 100644 vendor/github.com/openshift/api/image/v1/zz_generated.deepcopy.go create mode 100644 vendor/github.com/openshift/api/image/v1/zz_generated.swagger_doc_generated.go create mode 100644 vendor/github.com/openshift/api/imageregistry/.codegen.yaml create mode 100644 vendor/github.com/openshift/api/imageregistry/install.go create mode 100644 vendor/github.com/openshift/api/imageregistry/v1/00_imageregistry.crd.yaml create mode 100644 vendor/github.com/openshift/api/imageregistry/v1/00_imageregistry.crd.yaml-patch create mode 100644 vendor/github.com/openshift/api/imageregistry/v1/01_imagepruner.crd.yaml create mode 100644 vendor/github.com/openshift/api/imageregistry/v1/Makefile create mode 100644 vendor/github.com/openshift/api/imageregistry/v1/doc.go create mode 100644 vendor/github.com/openshift/api/imageregistry/v1/register.go create mode 100644 vendor/github.com/openshift/api/imageregistry/v1/stable.config.testsuite.yaml create mode 100644 vendor/github.com/openshift/api/imageregistry/v1/stable.imagepruner.testsuite.yaml create mode 100644 vendor/github.com/openshift/api/imageregistry/v1/types.go create mode 100644 vendor/github.com/openshift/api/imageregistry/v1/types_imagepruner.go create mode 100644 vendor/github.com/openshift/api/imageregistry/v1/zz_generated.deepcopy.go create mode 100644 vendor/github.com/openshift/api/imageregistry/v1/zz_generated.swagger_doc_generated.go create mode 100644 vendor/github.com/openshift/api/install.go create mode 100644 vendor/github.com/openshift/api/kubecontrolplane/.codegen.yaml create mode 100644 vendor/github.com/openshift/api/kubecontrolplane/install.go create mode 100644 vendor/github.com/openshift/api/kubecontrolplane/v1/doc.go create mode 100644 vendor/github.com/openshift/api/kubecontrolplane/v1/register.go create mode 100644 vendor/github.com/openshift/api/kubecontrolplane/v1/types.go create mode 100644 vendor/github.com/openshift/api/kubecontrolplane/v1/zz_generated.deepcopy.go create mode 100644 vendor/github.com/openshift/api/kubecontrolplane/v1/zz_generated.swagger_doc_generated.go create mode 100644 vendor/github.com/openshift/api/legacyconfig/v1/doc.go create mode 100644 vendor/github.com/openshift/api/legacyconfig/v1/register.go create mode 100644 vendor/github.com/openshift/api/legacyconfig/v1/serialization.go create mode 100644 vendor/github.com/openshift/api/legacyconfig/v1/stringsource.go create mode 100644 vendor/github.com/openshift/api/legacyconfig/v1/types.go create mode 100644 vendor/github.com/openshift/api/legacyconfig/v1/zz_generated.deepcopy.go create mode 100644 vendor/github.com/openshift/api/legacyconfig/v1/zz_generated.swagger_doc_generated.go create mode 100644 vendor/github.com/openshift/api/machine/.codegen.yaml create mode 100644 vendor/github.com/openshift/api/machine/OWNERS create mode 100644 vendor/github.com/openshift/api/machine/install.go create mode 100644 vendor/github.com/openshift/api/machine/v1/0000_10_controlplanemachineset.crd.yaml create mode 100644 vendor/github.com/openshift/api/machine/v1/Makefile create mode 100644 vendor/github.com/openshift/api/machine/v1/common.go create mode 100644 vendor/github.com/openshift/api/machine/v1/doc.go create mode 100644 vendor/github.com/openshift/api/machine/v1/register.go create mode 100644 vendor/github.com/openshift/api/machine/v1/stable.controlplanemachineset.aws.testsuite.yaml create mode 100644 vendor/github.com/openshift/api/machine/v1/stable.controlplanemachineset.azure.testsuite.yaml create mode 100644 vendor/github.com/openshift/api/machine/v1/stable.controlplanemachineset.gcp.testsuite.yaml create mode 100644 vendor/github.com/openshift/api/machine/v1/stable.controlplanemachineset.openstack.testsuite.yaml create mode 100644 vendor/github.com/openshift/api/machine/v1/stable.controlplanemachineset.testsuite.yaml create mode 100644 vendor/github.com/openshift/api/machine/v1/types_alibabaprovider.go create mode 100644 vendor/github.com/openshift/api/machine/v1/types_aws.go create mode 100644 vendor/github.com/openshift/api/machine/v1/types_controlplanemachineset.go create mode 100644 vendor/github.com/openshift/api/machine/v1/types_nutanixprovider.go create mode 100644 vendor/github.com/openshift/api/machine/v1/types_powervsprovider.go create mode 100644 vendor/github.com/openshift/api/machine/v1/zz_generated.deepcopy.go create mode 100644 vendor/github.com/openshift/api/machine/v1/zz_generated.swagger_doc_generated.go create mode 100644 vendor/github.com/openshift/api/machine/v1alpha1/doc.go create mode 100644 vendor/github.com/openshift/api/machine/v1alpha1/register.go create mode 100644 vendor/github.com/openshift/api/machine/v1alpha1/types_openstack.go create mode 100644 vendor/github.com/openshift/api/machine/v1alpha1/zz_generated.deepcopy.go create mode 100644 vendor/github.com/openshift/api/machine/v1alpha1/zz_generated.swagger_doc_generated.go create mode 100644 vendor/github.com/openshift/api/machine/v1beta1/0000_10_machine.crd.yaml create mode 100644 vendor/github.com/openshift/api/machine/v1beta1/0000_10_machinehealthcheck.yaml create mode 100644 vendor/github.com/openshift/api/machine/v1beta1/0000_10_machineset.crd.yaml create mode 100644 vendor/github.com/openshift/api/machine/v1beta1/Makefile create mode 100644 vendor/github.com/openshift/api/machine/v1beta1/doc.go create mode 100644 vendor/github.com/openshift/api/machine/v1beta1/register.go create mode 100644 vendor/github.com/openshift/api/machine/v1beta1/stable.machine.testsuite.yaml create mode 100644 vendor/github.com/openshift/api/machine/v1beta1/stable.machinehealthcheck.testsuite.yaml create mode 100644 vendor/github.com/openshift/api/machine/v1beta1/stable.machineset.testsuite.yaml create mode 100644 vendor/github.com/openshift/api/machine/v1beta1/types_awsprovider.go create mode 100644 vendor/github.com/openshift/api/machine/v1beta1/types_azureprovider.go create mode 100644 vendor/github.com/openshift/api/machine/v1beta1/types_gcpprovider.go create mode 100644 vendor/github.com/openshift/api/machine/v1beta1/types_machine.go create mode 100644 vendor/github.com/openshift/api/machine/v1beta1/types_machinehealthcheck.go create mode 100644 vendor/github.com/openshift/api/machine/v1beta1/types_machineset.go create mode 100644 vendor/github.com/openshift/api/machine/v1beta1/types_provider.go create mode 100644 vendor/github.com/openshift/api/machine/v1beta1/types_vsphereprovider.go create mode 100644 vendor/github.com/openshift/api/machine/v1beta1/zz_generated.deepcopy.go create mode 100644 vendor/github.com/openshift/api/machine/v1beta1/zz_generated.swagger_doc_generated.go create mode 100644 vendor/github.com/openshift/api/monitoring/.codegen.yaml create mode 100644 vendor/github.com/openshift/api/monitoring/install.go create mode 100644 vendor/github.com/openshift/api/monitoring/v1alpha1/0000_50_monitoring_01_alertingrules.crd.yaml create mode 100644 vendor/github.com/openshift/api/monitoring/v1alpha1/0000_50_monitoring_02_alertrelabelconfigs.crd.yaml create mode 100644 vendor/github.com/openshift/api/monitoring/v1alpha1/Makefile create mode 100644 vendor/github.com/openshift/api/monitoring/v1alpha1/doc.go create mode 100644 vendor/github.com/openshift/api/monitoring/v1alpha1/register.go create mode 100644 vendor/github.com/openshift/api/monitoring/v1alpha1/techpreview.alertingrule.testsuite.yaml create mode 100644 vendor/github.com/openshift/api/monitoring/v1alpha1/techpreview.alertrelabelconfig.testsuite.yaml create mode 100644 vendor/github.com/openshift/api/monitoring/v1alpha1/types.go create mode 100644 vendor/github.com/openshift/api/monitoring/v1alpha1/zz_generated.deepcopy.go create mode 100644 vendor/github.com/openshift/api/monitoring/v1alpha1/zz_generated.swagger_doc_generated.go create mode 100644 vendor/github.com/openshift/api/network/OWNERS create mode 100644 vendor/github.com/openshift/api/network/install.go create mode 100644 vendor/github.com/openshift/api/network/v1/001-clusternetwork-crd.yaml create mode 100644 vendor/github.com/openshift/api/network/v1/002-hostsubnet-crd.yaml create mode 100644 vendor/github.com/openshift/api/network/v1/003-netnamespace-crd.yaml create mode 100644 vendor/github.com/openshift/api/network/v1/004-egressnetworkpolicy-crd.yaml create mode 100644 vendor/github.com/openshift/api/network/v1/Makefile create mode 100644 vendor/github.com/openshift/api/network/v1/constants.go create mode 100644 vendor/github.com/openshift/api/network/v1/doc.go create mode 100644 vendor/github.com/openshift/api/network/v1/generated.pb.go create mode 100644 vendor/github.com/openshift/api/network/v1/generated.proto create mode 100644 vendor/github.com/openshift/api/network/v1/legacy.go create mode 100644 vendor/github.com/openshift/api/network/v1/register.go create mode 100644 vendor/github.com/openshift/api/network/v1/stable.clusternetwork.testsuite.yaml create mode 100644 vendor/github.com/openshift/api/network/v1/stable.egressnetworkpolicy.testsuite.yaml create mode 100644 vendor/github.com/openshift/api/network/v1/stable.hostsubnet.testsuite.yaml create mode 100644 vendor/github.com/openshift/api/network/v1/stable.netnamespace.testsuite.yaml create mode 100644 vendor/github.com/openshift/api/network/v1/types.go create mode 100644 vendor/github.com/openshift/api/network/v1/zz_generated.deepcopy.go create mode 100644 vendor/github.com/openshift/api/network/v1/zz_generated.swagger_doc_generated.go create mode 100644 vendor/github.com/openshift/api/networkoperator/.codegen.yaml create mode 100644 vendor/github.com/openshift/api/networkoperator/OWNERS create mode 100644 vendor/github.com/openshift/api/networkoperator/install.go create mode 100644 vendor/github.com/openshift/api/networkoperator/v1/001-egressrouter.crd.yaml create mode 100644 vendor/github.com/openshift/api/networkoperator/v1/001-egressrouter.crd.yaml-patch create mode 100644 vendor/github.com/openshift/api/networkoperator/v1/Makefile create mode 100644 vendor/github.com/openshift/api/networkoperator/v1/doc.go create mode 100644 vendor/github.com/openshift/api/networkoperator/v1/generated.pb.go create mode 100644 vendor/github.com/openshift/api/networkoperator/v1/generated.proto create mode 100644 vendor/github.com/openshift/api/networkoperator/v1/register.go create mode 100644 vendor/github.com/openshift/api/networkoperator/v1/stable.egressrouter.testsuite.yaml create mode 100644 vendor/github.com/openshift/api/networkoperator/v1/types_egressrouter.go create mode 100644 vendor/github.com/openshift/api/networkoperator/v1/zz_generated.deepcopy.go create mode 100644 vendor/github.com/openshift/api/networkoperator/v1/zz_generated.swagger_doc_generated.go create mode 100644 vendor/github.com/openshift/api/oauth/.codegen.yaml create mode 100644 vendor/github.com/openshift/api/oauth/install.go create mode 100644 vendor/github.com/openshift/api/oauth/v1/doc.go create mode 100644 vendor/github.com/openshift/api/oauth/v1/generated.pb.go create mode 100644 vendor/github.com/openshift/api/oauth/v1/generated.proto create mode 100644 vendor/github.com/openshift/api/oauth/v1/legacy.go create mode 100644 vendor/github.com/openshift/api/oauth/v1/register.go create mode 100644 vendor/github.com/openshift/api/oauth/v1/types.go create mode 100644 vendor/github.com/openshift/api/oauth/v1/zz_generated.deepcopy.go create mode 100644 vendor/github.com/openshift/api/oauth/v1/zz_generated.swagger_doc_generated.go create mode 100644 vendor/github.com/openshift/api/openshiftcontrolplane/.codegen.yaml create mode 100644 vendor/github.com/openshift/api/openshiftcontrolplane/install.go create mode 100644 vendor/github.com/openshift/api/openshiftcontrolplane/v1/doc.go create mode 100644 vendor/github.com/openshift/api/openshiftcontrolplane/v1/register.go create mode 100644 vendor/github.com/openshift/api/openshiftcontrolplane/v1/types.go create mode 100644 vendor/github.com/openshift/api/openshiftcontrolplane/v1/zz_generated.deepcopy.go create mode 100644 vendor/github.com/openshift/api/openshiftcontrolplane/v1/zz_generated.swagger_doc_generated.go create mode 100644 vendor/github.com/openshift/api/operator/.codegen.yaml create mode 100644 vendor/github.com/openshift/api/operator/install.go rename vendor/github.com/openshift/api/operator/v1/{0000_70_console-operator.crd.yaml => 00_console-operator.crd.yaml} (97%) create mode 100644 vendor/github.com/openshift/api/operator/v1alpha1/0000_10_01_etcdbackup-TechPreviewNoUpgrade.crd.yaml create mode 100644 vendor/github.com/openshift/api/operator/v1alpha1/0000_10_config-operator_01_olm-CustomNoUpgrade.crd.yaml create mode 100644 vendor/github.com/openshift/api/operator/v1alpha1/0000_10_config-operator_01_olm-TechPreviewNoUpgrade.crd.yaml create mode 100644 vendor/github.com/openshift/api/operator/v1alpha1/custom.olm.testsuite.yaml create mode 100644 vendor/github.com/openshift/api/operator/v1alpha1/techpreview.etcdbackup.testsuite.yaml create mode 100644 vendor/github.com/openshift/api/operator/v1alpha1/techpreview.olm.testsuite.yaml create mode 100644 vendor/github.com/openshift/api/operator/v1alpha1/types_etcdbackup.go create mode 100644 vendor/github.com/openshift/api/operator/v1alpha1/types_olm.go create mode 100644 vendor/github.com/openshift/api/operatorcontrolplane/.codegen.yaml create mode 100644 vendor/github.com/openshift/api/operatorcontrolplane/install.go create mode 100644 vendor/github.com/openshift/api/operatorcontrolplane/v1alpha1/0000_10-pod-network-connectivity-check.crd.yaml create mode 100644 vendor/github.com/openshift/api/operatorcontrolplane/v1alpha1/Makefile create mode 100644 vendor/github.com/openshift/api/operatorcontrolplane/v1alpha1/doc.go create mode 100644 vendor/github.com/openshift/api/operatorcontrolplane/v1alpha1/register.go create mode 100644 vendor/github.com/openshift/api/operatorcontrolplane/v1alpha1/stable.podnetworkconnectivitycheck.testsuite.yaml create mode 100644 vendor/github.com/openshift/api/operatorcontrolplane/v1alpha1/types_conditioncheck.go create mode 100644 vendor/github.com/openshift/api/operatorcontrolplane/v1alpha1/zz_generated.deepcopy.go create mode 100644 vendor/github.com/openshift/api/operatorcontrolplane/v1alpha1/zz_generated.swagger_doc_generated.go create mode 100644 vendor/github.com/openshift/api/osin/install.go create mode 100644 vendor/github.com/openshift/api/osin/v1/doc.go create mode 100644 vendor/github.com/openshift/api/osin/v1/register.go create mode 100644 vendor/github.com/openshift/api/osin/v1/types.go create mode 100644 vendor/github.com/openshift/api/osin/v1/zz_generated.deepcopy.go create mode 100644 vendor/github.com/openshift/api/osin/v1/zz_generated.swagger_doc_generated.go create mode 100644 vendor/github.com/openshift/api/pkg/serialization/serialization.go create mode 100644 vendor/github.com/openshift/api/project/OWNERS create mode 100644 vendor/github.com/openshift/api/project/install.go create mode 100644 vendor/github.com/openshift/api/project/v1/doc.go create mode 100644 vendor/github.com/openshift/api/project/v1/generated.pb.go create mode 100644 vendor/github.com/openshift/api/project/v1/generated.proto create mode 100644 vendor/github.com/openshift/api/project/v1/legacy.go create mode 100644 vendor/github.com/openshift/api/project/v1/register.go create mode 100644 vendor/github.com/openshift/api/project/v1/types.go create mode 100644 vendor/github.com/openshift/api/project/v1/zz_generated.deepcopy.go create mode 100644 vendor/github.com/openshift/api/project/v1/zz_generated.swagger_doc_generated.go create mode 100644 vendor/github.com/openshift/api/quota/OWNERS create mode 100644 vendor/github.com/openshift/api/quota/install.go create mode 100644 vendor/github.com/openshift/api/quota/v1/0000_03_quota-openshift_01_clusterresourcequota.crd.yaml create mode 100644 vendor/github.com/openshift/api/quota/v1/Makefile create mode 100644 vendor/github.com/openshift/api/quota/v1/doc.go create mode 100644 vendor/github.com/openshift/api/quota/v1/generated.pb.go create mode 100644 vendor/github.com/openshift/api/quota/v1/generated.proto create mode 100644 vendor/github.com/openshift/api/quota/v1/legacy.go create mode 100644 vendor/github.com/openshift/api/quota/v1/register.go create mode 100644 vendor/github.com/openshift/api/quota/v1/stable.clusterresourcequota.testsuite.yaml create mode 100644 vendor/github.com/openshift/api/quota/v1/types.go create mode 100644 vendor/github.com/openshift/api/quota/v1/zz_generated.deepcopy.go create mode 100644 vendor/github.com/openshift/api/quota/v1/zz_generated.swagger_doc_generated.go create mode 100644 vendor/github.com/openshift/api/route/.codegen.yaml create mode 100644 vendor/github.com/openshift/api/route/OWNERS create mode 100644 vendor/github.com/openshift/api/route/install.go create mode 100644 vendor/github.com/openshift/api/route/v1/Makefile create mode 100644 vendor/github.com/openshift/api/route/v1/custom.route.testsuite.yaml create mode 100644 vendor/github.com/openshift/api/route/v1/doc.go create mode 100644 vendor/github.com/openshift/api/route/v1/generated.pb.go create mode 100644 vendor/github.com/openshift/api/route/v1/generated.proto create mode 100644 vendor/github.com/openshift/api/route/v1/legacy.go create mode 100644 vendor/github.com/openshift/api/route/v1/register.go create mode 100644 vendor/github.com/openshift/api/route/v1/route-CustomNoUpgrade.crd.yaml create mode 100644 vendor/github.com/openshift/api/route/v1/route-TechPreviewNoUpgrade.crd.yaml create mode 100644 vendor/github.com/openshift/api/route/v1/route.crd.yaml create mode 100644 vendor/github.com/openshift/api/route/v1/route.crd.yaml-patch create mode 100644 vendor/github.com/openshift/api/route/v1/stable.route.testsuite.yaml create mode 100644 vendor/github.com/openshift/api/route/v1/techpreview.route.testsuite.yaml create mode 100644 vendor/github.com/openshift/api/route/v1/test-route-validation.sh create mode 100644 vendor/github.com/openshift/api/route/v1/types.go create mode 100644 vendor/github.com/openshift/api/route/v1/zz_generated.deepcopy.go create mode 100644 vendor/github.com/openshift/api/route/v1/zz_generated.swagger_doc_generated.go create mode 100644 vendor/github.com/openshift/api/samples/.codegen.yaml create mode 100644 vendor/github.com/openshift/api/samples/install.go create mode 100644 vendor/github.com/openshift/api/samples/v1/00_samplesconfig.crd.yaml create mode 100644 vendor/github.com/openshift/api/samples/v1/Makefile create mode 100644 vendor/github.com/openshift/api/samples/v1/doc.go create mode 100644 vendor/github.com/openshift/api/samples/v1/generated.pb.go create mode 100644 vendor/github.com/openshift/api/samples/v1/generated.proto create mode 100644 vendor/github.com/openshift/api/samples/v1/register.go create mode 100644 vendor/github.com/openshift/api/samples/v1/stable.config.testsuite.yaml create mode 100644 vendor/github.com/openshift/api/samples/v1/types_config.go create mode 100644 vendor/github.com/openshift/api/samples/v1/zz_generated.deepcopy.go create mode 100644 vendor/github.com/openshift/api/samples/v1/zz_generated.swagger_doc_generated.go create mode 100644 vendor/github.com/openshift/api/security/install.go create mode 100644 vendor/github.com/openshift/api/security/v1/0000_03_security-openshift_01_scc.crd.yaml create mode 100644 vendor/github.com/openshift/api/security/v1/Makefile create mode 100644 vendor/github.com/openshift/api/security/v1/consts.go create mode 100644 vendor/github.com/openshift/api/security/v1/doc.go create mode 100644 vendor/github.com/openshift/api/security/v1/generated.pb.go create mode 100644 vendor/github.com/openshift/api/security/v1/generated.proto create mode 100644 vendor/github.com/openshift/api/security/v1/legacy.go create mode 100644 vendor/github.com/openshift/api/security/v1/register.go create mode 100644 vendor/github.com/openshift/api/security/v1/stable.securitycontextconstraints.testsuite.yaml create mode 100644 vendor/github.com/openshift/api/security/v1/types.go create mode 100644 vendor/github.com/openshift/api/security/v1/zz_generated.deepcopy.go create mode 100644 vendor/github.com/openshift/api/security/v1/zz_generated.swagger_doc_generated.go create mode 100644 vendor/github.com/openshift/api/servicecertsigner/.codegen.yaml create mode 100644 vendor/github.com/openshift/api/servicecertsigner/install.go create mode 100644 vendor/github.com/openshift/api/servicecertsigner/v1alpha1/doc.go create mode 100644 vendor/github.com/openshift/api/servicecertsigner/v1alpha1/register.go create mode 100644 vendor/github.com/openshift/api/servicecertsigner/v1alpha1/types.go create mode 100644 vendor/github.com/openshift/api/servicecertsigner/v1alpha1/zz_generated.deepcopy.go create mode 100644 vendor/github.com/openshift/api/servicecertsigner/v1alpha1/zz_generated.swagger_doc_generated.go create mode 100644 vendor/github.com/openshift/api/sharedresource/.codegen.yaml create mode 100644 vendor/github.com/openshift/api/sharedresource/OWNERS create mode 100644 vendor/github.com/openshift/api/sharedresource/install.go create mode 100644 vendor/github.com/openshift/api/sharedresource/v1alpha1/0000_10_sharedconfigmap.crd.yaml create mode 100644 vendor/github.com/openshift/api/sharedresource/v1alpha1/0000_10_sharedsecret.crd.yaml create mode 100644 vendor/github.com/openshift/api/sharedresource/v1alpha1/Makefile create mode 100644 vendor/github.com/openshift/api/sharedresource/v1alpha1/doc.go create mode 100644 vendor/github.com/openshift/api/sharedresource/v1alpha1/register.go create mode 100644 vendor/github.com/openshift/api/sharedresource/v1alpha1/stable.sharedconfigmap.testsuite.yaml create mode 100644 vendor/github.com/openshift/api/sharedresource/v1alpha1/stable.sharedsecret.testsuite.yaml create mode 100644 vendor/github.com/openshift/api/sharedresource/v1alpha1/types_shared_configmap.go create mode 100644 vendor/github.com/openshift/api/sharedresource/v1alpha1/types_shared_secret.go create mode 100644 vendor/github.com/openshift/api/sharedresource/v1alpha1/zz_generated.deepcopy.go create mode 100644 vendor/github.com/openshift/api/sharedresource/v1alpha1/zz_generated.swagger_doc_generated.go create mode 100644 vendor/github.com/openshift/api/template/OWNERS create mode 100644 vendor/github.com/openshift/api/template/install.go create mode 100644 vendor/github.com/openshift/api/template/v1/codec.go create mode 100644 vendor/github.com/openshift/api/template/v1/consts.go create mode 100644 vendor/github.com/openshift/api/template/v1/doc.go create mode 100644 vendor/github.com/openshift/api/template/v1/generated.pb.go create mode 100644 vendor/github.com/openshift/api/template/v1/generated.proto create mode 100644 vendor/github.com/openshift/api/template/v1/legacy.go create mode 100644 vendor/github.com/openshift/api/template/v1/register.go create mode 100644 vendor/github.com/openshift/api/template/v1/types.go create mode 100644 vendor/github.com/openshift/api/template/v1/zz_generated.deepcopy.go create mode 100644 vendor/github.com/openshift/api/template/v1/zz_generated.swagger_doc_generated.go create mode 100644 vendor/github.com/openshift/api/user/install.go create mode 100644 vendor/github.com/openshift/api/user/v1/doc.go create mode 100644 vendor/github.com/openshift/api/user/v1/generated.pb.go create mode 100644 vendor/github.com/openshift/api/user/v1/generated.proto create mode 100644 vendor/github.com/openshift/api/user/v1/legacy.go create mode 100644 vendor/github.com/openshift/api/user/v1/register.go create mode 100644 vendor/github.com/openshift/api/user/v1/types.go create mode 100644 vendor/github.com/openshift/api/user/v1/zz_generated.deepcopy.go create mode 100644 vendor/github.com/openshift/api/user/v1/zz_generated.swagger_doc_generated.go create mode 100644 vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/binarybuildsource.go create mode 100644 vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/bitbucketwebhookcause.go create mode 100644 vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/build.go create mode 100644 vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildcondition.go create mode 100644 vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildconfig.go create mode 100644 vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildconfigspec.go create mode 100644 vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildconfigstatus.go create mode 100644 vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildoutput.go create mode 100644 vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildpostcommitspec.go create mode 100644 vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildsource.go create mode 100644 vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildspec.go create mode 100644 vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildstatus.go create mode 100644 vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildstatusoutput.go create mode 100644 vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildstatusoutputto.go create mode 100644 vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildstrategy.go create mode 100644 vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildtriggercause.go create mode 100644 vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildtriggerpolicy.go create mode 100644 vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildvolume.go create mode 100644 vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildvolumemount.go create mode 100644 vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildvolumesource.go create mode 100644 vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/commonspec.go create mode 100644 vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/commonwebhookcause.go create mode 100644 vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/configmapbuildsource.go create mode 100644 vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/custombuildstrategy.go create mode 100644 vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/dockerbuildstrategy.go create mode 100644 vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/genericwebhookcause.go create mode 100644 vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/gitbuildsource.go create mode 100644 vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/githubwebhookcause.go create mode 100644 vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/gitlabwebhookcause.go create mode 100644 vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/gitsourcerevision.go create mode 100644 vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/imagechangecause.go create mode 100644 vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/imagechangetrigger.go create mode 100644 vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/imagechangetriggerstatus.go create mode 100644 vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/imagelabel.go create mode 100644 vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/imagesource.go create mode 100644 vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/imagesourcepath.go create mode 100644 vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/imagestreamtagreference.go create mode 100644 vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/jenkinspipelinebuildstrategy.go create mode 100644 vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/proxyconfig.go create mode 100644 vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/secretbuildsource.go create mode 100644 vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/secretlocalreference.go create mode 100644 vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/secretspec.go create mode 100644 vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/sourcebuildstrategy.go create mode 100644 vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/sourcecontroluser.go create mode 100644 vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/sourcerevision.go create mode 100644 vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/stageinfo.go create mode 100644 vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/stepinfo.go create mode 100644 vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/webhooktrigger.go create mode 100644 vendor/github.com/openshift/client-go/build/applyconfigurations/internal/internal.go create mode 100644 vendor/github.com/openshift/client-go/build/clientset/versioned/clientset.go create mode 100644 vendor/github.com/openshift/client-go/build/clientset/versioned/doc.go create mode 100644 vendor/github.com/openshift/client-go/build/clientset/versioned/scheme/doc.go create mode 100644 vendor/github.com/openshift/client-go/build/clientset/versioned/scheme/register.go create mode 100644 vendor/github.com/openshift/client-go/build/clientset/versioned/typed/build/v1/build.go create mode 100644 vendor/github.com/openshift/client-go/build/clientset/versioned/typed/build/v1/build_client.go create mode 100644 vendor/github.com/openshift/client-go/build/clientset/versioned/typed/build/v1/buildconfig.go create mode 100644 vendor/github.com/openshift/client-go/build/clientset/versioned/typed/build/v1/doc.go create mode 100644 vendor/github.com/openshift/client-go/build/clientset/versioned/typed/build/v1/generated_expansion.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awsdnsspec.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/azureresourcetag.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/baremetalplatformloadbalancer.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/dnsplatformspec.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/externalplatformspec.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregateattributes.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregatedetails.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregatestatus.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nutanixplatformloadbalancer.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/openstackplatformloadbalancer.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ovirtplatformloadbalancer.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformfailuredomainspec.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformloadbalancer.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformnodenetworking.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformnodenetworkingspec.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformspec.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformtopology.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformvcenterspec.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/gatherconfig.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/insightsdatagather.go create mode 100644 vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/insightsdatagatherspec.go create mode 100644 vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/config_client.go create mode 100644 vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/doc.go create mode 100644 vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/generated_expansion.go create mode 100644 vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/insightsdatagather.go create mode 100644 vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1alpha1/insightsdatagather.go create mode 100644 vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1alpha1/interface.go create mode 100644 vendor/github.com/openshift/client-go/config/listers/config/v1alpha1/expansion_generated.go create mode 100644 vendor/github.com/openshift/client-go/config/listers/config/v1alpha1/insightsdatagather.go create mode 100644 vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/image.go create mode 100644 vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/imagelayer.go create mode 100644 vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/imagelookuppolicy.go create mode 100644 vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/imagemanifest.go create mode 100644 vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/imagesignature.go create mode 100644 vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/imagestream.go create mode 100644 vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/imagestreammapping.go create mode 100644 vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/imagestreamspec.go create mode 100644 vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/imagestreamstatus.go create mode 100644 vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/namedtageventlist.go create mode 100644 vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/signaturecondition.go create mode 100644 vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/signaturegenericentity.go create mode 100644 vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/signatureissuer.go create mode 100644 vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/signaturesubject.go create mode 100644 vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/tagevent.go create mode 100644 vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/tageventcondition.go create mode 100644 vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/tagimportpolicy.go create mode 100644 vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/tagreference.go create mode 100644 vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/tagreferencepolicy.go create mode 100644 vendor/github.com/openshift/client-go/image/applyconfigurations/internal/internal.go create mode 100644 vendor/github.com/openshift/client-go/image/clientset/versioned/clientset.go create mode 100644 vendor/github.com/openshift/client-go/image/clientset/versioned/doc.go create mode 100644 vendor/github.com/openshift/client-go/image/clientset/versioned/scheme/doc.go create mode 100644 vendor/github.com/openshift/client-go/image/clientset/versioned/scheme/register.go create mode 100644 vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/doc.go create mode 100644 vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/generated_expansion.go create mode 100644 vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/image.go create mode 100644 vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/image_client.go create mode 100644 vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/imagesignature.go create mode 100644 vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/imagestream.go create mode 100644 vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/imagestreamimage.go create mode 100644 vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/imagestreamimport.go create mode 100644 vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/imagestreammapping.go create mode 100644 vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/imagestreamtag.go create mode 100644 vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/imagetag.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/awscsidriverconfigspec.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/azurecsidriverconfigspec.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/azurediskencryptionset.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/csidriverconfigspec.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/developerconsolecatalogtypes.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/gcpcsidriverconfigspec.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/gcpkmskeyreference.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ibmloadbalancerparameters.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/perspective.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/perspectivevisibility.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/pinnedresourcereference.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/resourceattributesaccessreview.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/serviceaccountissuerstatus.go create mode 100644 vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/vspherecsidriverconfigspec.go create mode 100644 vendor/github.com/openshift/library-go/LICENSE create mode 100644 vendor/github.com/openshift/library-go/pkg/controller/factory/base_controller.go create mode 100644 vendor/github.com/openshift/library-go/pkg/controller/factory/controller_context.go create mode 100644 vendor/github.com/openshift/library-go/pkg/controller/factory/eventfilters.go create mode 100644 vendor/github.com/openshift/library-go/pkg/controller/factory/factory.go create mode 100644 vendor/github.com/openshift/library-go/pkg/controller/factory/interfaces.go create mode 100644 vendor/github.com/openshift/library-go/pkg/crypto/OWNERS create mode 100644 vendor/github.com/openshift/library-go/pkg/crypto/crypto.go create mode 100644 vendor/github.com/openshift/library-go/pkg/crypto/rotation.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/condition/condition.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/configobserver/config_observer_controller.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/configobserver/featuregates/featuregate.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/configobserver/featuregates/hardcoded_featuregate_reader.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/configobserver/featuregates/observe_featuregates.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/configobserver/featuregates/simple_featuregate_reader.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/configobserver/unstructured.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/events/OWNERS create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/events/recorder.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/events/recorder_in_memory.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/events/recorder_logging.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/events/recorder_upstream.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/management/management_state.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/admissionregistration.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/apiextensions.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/apiregistration.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/apps.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/core.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/credentialsrequest.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/event_helpers.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/generic.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/json_patch_helpers.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/migration.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/monitoring.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/policy.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/rbac.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/resource_cache.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/storage.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/unstructured.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/volumesnapshotclass.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/resource/resourcehelper/resource_helpers.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/admissionregistration.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/apiextensions.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/apps.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/generic_config_merger.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/object_merger.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/admission.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/apiextensions.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/apps.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/core.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/generic.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/images.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/migration.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/policy.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/rbac.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/route.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/storage.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/unstructured.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/resourcesynccontroller/core.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/resourcesynccontroller/interfaces.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/resourcesynccontroller/resourcesync_controller.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/v1helpers/args.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/v1helpers/core_getters.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/v1helpers/fake_informers.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/v1helpers/helpers.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/v1helpers/informers.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/v1helpers/interfaces.go create mode 100644 vendor/github.com/openshift/library-go/pkg/operator/v1helpers/test_helpers.go create mode 100644 vendor/github.com/openshift/machine-config-operator/pkg/controller/common/layered_node_state.go create mode 100644 vendor/github.com/openshift/machine-config-operator/pkg/controller/common/layered_pool_state.go create mode 100644 vendor/github.com/openshift/machine-config-operator/pkg/version/version.go create mode 100644 vendor/github.com/openshift/machine-config-operator/pkg/version/version_fcos.go create mode 100644 vendor/github.com/openshift/machine-config-operator/pkg/version/version_scos.go create mode 100644 vendor/github.com/robfig/cron/.gitignore create mode 100644 vendor/github.com/robfig/cron/.travis.yml create mode 100644 vendor/github.com/robfig/cron/LICENSE create mode 100644 vendor/github.com/robfig/cron/README.md create mode 100644 vendor/github.com/robfig/cron/constantdelay.go create mode 100644 vendor/github.com/robfig/cron/cron.go create mode 100644 vendor/github.com/robfig/cron/doc.go create mode 100644 vendor/github.com/robfig/cron/parser.go create mode 100644 vendor/github.com/robfig/cron/spec.go create mode 100644 vendor/k8s.io/apiserver/LICENSE create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/user/doc.go create mode 100644 vendor/k8s.io/apiserver/pkg/authentication/user/user.go create mode 100644 vendor/k8s.io/component-base/metrics/OWNERS create mode 100644 vendor/k8s.io/component-base/metrics/buckets.go create mode 100644 vendor/k8s.io/component-base/metrics/collector.go create mode 100644 vendor/k8s.io/component-base/metrics/counter.go create mode 100644 vendor/k8s.io/component-base/metrics/desc.go create mode 100644 vendor/k8s.io/component-base/metrics/gauge.go create mode 100644 vendor/k8s.io/component-base/metrics/histogram.go create mode 100644 vendor/k8s.io/component-base/metrics/http.go create mode 100644 vendor/k8s.io/component-base/metrics/labels.go create mode 100644 vendor/k8s.io/component-base/metrics/legacyregistry/registry.go create mode 100644 vendor/k8s.io/component-base/metrics/metric.go create mode 100644 vendor/k8s.io/component-base/metrics/options.go create mode 100644 vendor/k8s.io/component-base/metrics/opts.go create mode 100644 vendor/k8s.io/component-base/metrics/processstarttime.go create mode 100644 vendor/k8s.io/component-base/metrics/processstarttime_others.go create mode 100644 vendor/k8s.io/component-base/metrics/processstarttime_windows.go create mode 100644 vendor/k8s.io/component-base/metrics/prometheusextension/timing_histogram.go create mode 100644 vendor/k8s.io/component-base/metrics/prometheusextension/timing_histogram_vec.go create mode 100644 vendor/k8s.io/component-base/metrics/prometheusextension/weighted_histogram.go create mode 100644 vendor/k8s.io/component-base/metrics/prometheusextension/weighted_histogram_vec.go create mode 100644 vendor/k8s.io/component-base/metrics/registry.go create mode 100644 vendor/k8s.io/component-base/metrics/summary.go create mode 100644 vendor/k8s.io/component-base/metrics/timing_histogram.go create mode 100644 vendor/k8s.io/component-base/metrics/value.go create mode 100644 vendor/k8s.io/component-base/metrics/version.go create mode 100644 vendor/k8s.io/component-base/metrics/version_parser.go create mode 100644 vendor/k8s.io/component-base/metrics/wrappers.go create mode 100644 vendor/k8s.io/kube-aggregator/LICENSE create mode 100644 vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/doc.go create mode 100644 vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/helpers.go create mode 100644 vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/register.go create mode 100644 vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/types.go create mode 100644 vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/defaults.go create mode 100644 vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/doc.go create mode 100644 vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/generated.pb.go create mode 100644 vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/generated.proto create mode 100644 vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/register.go create mode 100644 vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/types.go create mode 100644 vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/zz_generated.conversion.go create mode 100644 vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/zz_generated.defaults.go create mode 100644 vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/defaults.go create mode 100644 vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/doc.go create mode 100644 vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/generated.pb.go create mode 100644 vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/generated.proto create mode 100644 vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/register.go create mode 100644 vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/types.go create mode 100644 vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/zz_generated.conversion.go create mode 100644 vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/zz_generated.defaults.go create mode 100644 vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/zz_generated.prerelease-lifecycle.go create mode 100644 vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme/doc.go create mode 100644 vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme/register.go create mode 100644 vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1/apiregistration_client.go create mode 100644 vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1/apiservice.go create mode 100644 vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1/doc.go create mode 100644 vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1/generated_expansion.go create mode 100644 vendor/sigs.k8s.io/kube-storage-version-migrator/LICENSE create mode 100644 vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/apis/migration/v1alpha1/doc.go create mode 100644 vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/apis/migration/v1alpha1/register.go create mode 100644 vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/apis/migration/v1alpha1/types.go create mode 100644 vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/apis/migration/v1alpha1/zz_generated.deepcopy.go create mode 100644 vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/clientset.go create mode 100644 vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/doc.go create mode 100644 vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/scheme/doc.go create mode 100644 vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/scheme/register.go create mode 100644 vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/typed/migration/v1alpha1/doc.go create mode 100644 vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/typed/migration/v1alpha1/generated_expansion.go create mode 100644 vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/typed/migration/v1alpha1/migration_client.go create mode 100644 vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/typed/migration/v1alpha1/storagestate.go create mode 100644 vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/typed/migration/v1alpha1/storageversionmigration.go diff --git a/vendor/github.com/ajeddeloh/go-json/OWNERS b/vendor/github.com/ajeddeloh/go-json/OWNERS new file mode 100644 index 000000000..748180204 --- /dev/null +++ b/vendor/github.com/ajeddeloh/go-json/OWNERS @@ -0,0 +1,19 @@ +# See the OWNERS docs: https://git.k8s.io/community/contributors/guide/owners.md + +# This matches the fedora-coreos-tools team members. +# XXX: figure out a way to just have it use GitHub team membership directly. + +approvers: + - arithx + - ashcrow + - bgilbert + - cgwalters + - darkmuggle + - dustymabe + - jlebon + - LorbusChris + - lucab + - miabbott + - mike-nguyen + - yuqi-zhang + - zonggen diff --git a/vendor/github.com/aws/aws-sdk-go/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go/LICENSE.txt new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go/NOTICE.txt b/vendor/github.com/aws/aws-sdk-go/NOTICE.txt new file mode 100644 index 000000000..899129ecc --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/NOTICE.txt @@ -0,0 +1,3 @@ +AWS SDK for Go +Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. +Copyright 2014-2015 Stripe, Inc. diff --git a/vendor/github.com/aws/aws-sdk-go/aws/arn/arn.go b/vendor/github.com/aws/aws-sdk-go/aws/arn/arn.go new file mode 100644 index 000000000..1c4967429 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/arn/arn.go @@ -0,0 +1,93 @@ +// Package arn provides a parser for interacting with Amazon Resource Names. +package arn + +import ( + "errors" + "strings" +) + +const ( + arnDelimiter = ":" + arnSections = 6 + arnPrefix = "arn:" + + // zero-indexed + sectionPartition = 1 + sectionService = 2 + sectionRegion = 3 + sectionAccountID = 4 + sectionResource = 5 + + // errors + invalidPrefix = "arn: invalid prefix" + invalidSections = "arn: not enough sections" +) + +// ARN captures the individual fields of an Amazon Resource Name. +// See http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html for more information. +type ARN struct { + // The partition that the resource is in. For standard AWS regions, the partition is "aws". If you have resources in + // other partitions, the partition is "aws-partitionname". For example, the partition for resources in the China + // (Beijing) region is "aws-cn". + Partition string + + // The service namespace that identifies the AWS product (for example, Amazon S3, IAM, or Amazon RDS). For a list of + // namespaces, see + // http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces. + Service string + + // The region the resource resides in. Note that the ARNs for some resources do not require a region, so this + // component might be omitted. + Region string + + // The ID of the AWS account that owns the resource, without the hyphens. For example, 123456789012. Note that the + // ARNs for some resources don't require an account number, so this component might be omitted. + AccountID string + + // The content of this part of the ARN varies by service. It often includes an indicator of the type of resource — + // for example, an IAM user or Amazon RDS database - followed by a slash (/) or a colon (:), followed by the + // resource name itself. Some services allows paths for resource names, as described in + // http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arns-paths. + Resource string +} + +// Parse parses an ARN into its constituent parts. +// +// Some example ARNs: +// arn:aws:elasticbeanstalk:us-east-1:123456789012:environment/My App/MyEnvironment +// arn:aws:iam::123456789012:user/David +// arn:aws:rds:eu-west-1:123456789012:db:mysql-db +// arn:aws:s3:::my_corporate_bucket/exampleobject.png +func Parse(arn string) (ARN, error) { + if !strings.HasPrefix(arn, arnPrefix) { + return ARN{}, errors.New(invalidPrefix) + } + sections := strings.SplitN(arn, arnDelimiter, arnSections) + if len(sections) != arnSections { + return ARN{}, errors.New(invalidSections) + } + return ARN{ + Partition: sections[sectionPartition], + Service: sections[sectionService], + Region: sections[sectionRegion], + AccountID: sections[sectionAccountID], + Resource: sections[sectionResource], + }, nil +} + +// IsARN returns whether the given string is an ARN by looking for +// whether the string starts with "arn:" and contains the correct number +// of sections delimited by colons(:). +func IsARN(arn string) bool { + return strings.HasPrefix(arn, arnPrefix) && strings.Count(arn, ":") >= arnSections-1 +} + +// String returns the canonical representation of the ARN +func (arn ARN) String() string { + return arnPrefix + + arn.Partition + arnDelimiter + + arn.Service + arnDelimiter + + arn.Region + arnDelimiter + + arn.AccountID + arnDelimiter + + arn.Resource +} diff --git a/vendor/github.com/blang/semver/v4/LICENSE b/vendor/github.com/blang/semver/v4/LICENSE new file mode 100644 index 000000000..5ba5c86fc --- /dev/null +++ b/vendor/github.com/blang/semver/v4/LICENSE @@ -0,0 +1,22 @@ +The MIT License + +Copyright (c) 2014 Benedikt Lang + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + diff --git a/vendor/github.com/blang/semver/v4/json.go b/vendor/github.com/blang/semver/v4/json.go new file mode 100644 index 000000000..a74bf7c44 --- /dev/null +++ b/vendor/github.com/blang/semver/v4/json.go @@ -0,0 +1,23 @@ +package semver + +import ( + "encoding/json" +) + +// MarshalJSON implements the encoding/json.Marshaler interface. +func (v Version) MarshalJSON() ([]byte, error) { + return json.Marshal(v.String()) +} + +// UnmarshalJSON implements the encoding/json.Unmarshaler interface. +func (v *Version) UnmarshalJSON(data []byte) (err error) { + var versionString string + + if err = json.Unmarshal(data, &versionString); err != nil { + return + } + + *v, err = Parse(versionString) + + return +} diff --git a/vendor/github.com/blang/semver/v4/range.go b/vendor/github.com/blang/semver/v4/range.go new file mode 100644 index 000000000..95f7139b9 --- /dev/null +++ b/vendor/github.com/blang/semver/v4/range.go @@ -0,0 +1,416 @@ +package semver + +import ( + "fmt" + "strconv" + "strings" + "unicode" +) + +type wildcardType int + +const ( + noneWildcard wildcardType = iota + majorWildcard wildcardType = 1 + minorWildcard wildcardType = 2 + patchWildcard wildcardType = 3 +) + +func wildcardTypefromInt(i int) wildcardType { + switch i { + case 1: + return majorWildcard + case 2: + return minorWildcard + case 3: + return patchWildcard + default: + return noneWildcard + } +} + +type comparator func(Version, Version) bool + +var ( + compEQ comparator = func(v1 Version, v2 Version) bool { + return v1.Compare(v2) == 0 + } + compNE = func(v1 Version, v2 Version) bool { + return v1.Compare(v2) != 0 + } + compGT = func(v1 Version, v2 Version) bool { + return v1.Compare(v2) == 1 + } + compGE = func(v1 Version, v2 Version) bool { + return v1.Compare(v2) >= 0 + } + compLT = func(v1 Version, v2 Version) bool { + return v1.Compare(v2) == -1 + } + compLE = func(v1 Version, v2 Version) bool { + return v1.Compare(v2) <= 0 + } +) + +type versionRange struct { + v Version + c comparator +} + +// rangeFunc creates a Range from the given versionRange. +func (vr *versionRange) rangeFunc() Range { + return Range(func(v Version) bool { + return vr.c(v, vr.v) + }) +} + +// Range represents a range of versions. +// A Range can be used to check if a Version satisfies it: +// +// range, err := semver.ParseRange(">1.0.0 <2.0.0") +// range(semver.MustParse("1.1.1") // returns true +type Range func(Version) bool + +// OR combines the existing Range with another Range using logical OR. +func (rf Range) OR(f Range) Range { + return Range(func(v Version) bool { + return rf(v) || f(v) + }) +} + +// AND combines the existing Range with another Range using logical AND. +func (rf Range) AND(f Range) Range { + return Range(func(v Version) bool { + return rf(v) && f(v) + }) +} + +// ParseRange parses a range and returns a Range. +// If the range could not be parsed an error is returned. +// +// Valid ranges are: +// - "<1.0.0" +// - "<=1.0.0" +// - ">1.0.0" +// - ">=1.0.0" +// - "1.0.0", "=1.0.0", "==1.0.0" +// - "!1.0.0", "!=1.0.0" +// +// A Range can consist of multiple ranges separated by space: +// Ranges can be linked by logical AND: +// - ">1.0.0 <2.0.0" would match between both ranges, so "1.1.1" and "1.8.7" but not "1.0.0" or "2.0.0" +// - ">1.0.0 <3.0.0 !2.0.3-beta.2" would match every version between 1.0.0 and 3.0.0 except 2.0.3-beta.2 +// +// Ranges can also be linked by logical OR: +// - "<2.0.0 || >=3.0.0" would match "1.x.x" and "3.x.x" but not "2.x.x" +// +// AND has a higher precedence than OR. It's not possible to use brackets. +// +// Ranges can be combined by both AND and OR +// +// - `>1.0.0 <2.0.0 || >3.0.0 !4.2.1` would match `1.2.3`, `1.9.9`, `3.1.1`, but not `4.2.1`, `2.1.1` +func ParseRange(s string) (Range, error) { + parts := splitAndTrim(s) + orParts, err := splitORParts(parts) + if err != nil { + return nil, err + } + expandedParts, err := expandWildcardVersion(orParts) + if err != nil { + return nil, err + } + var orFn Range + for _, p := range expandedParts { + var andFn Range + for _, ap := range p { + opStr, vStr, err := splitComparatorVersion(ap) + if err != nil { + return nil, err + } + vr, err := buildVersionRange(opStr, vStr) + if err != nil { + return nil, fmt.Errorf("Could not parse Range %q: %s", ap, err) + } + rf := vr.rangeFunc() + + // Set function + if andFn == nil { + andFn = rf + } else { // Combine with existing function + andFn = andFn.AND(rf) + } + } + if orFn == nil { + orFn = andFn + } else { + orFn = orFn.OR(andFn) + } + + } + return orFn, nil +} + +// splitORParts splits the already cleaned parts by '||'. +// Checks for invalid positions of the operator and returns an +// error if found. +func splitORParts(parts []string) ([][]string, error) { + var ORparts [][]string + last := 0 + for i, p := range parts { + if p == "||" { + if i == 0 { + return nil, fmt.Errorf("First element in range is '||'") + } + ORparts = append(ORparts, parts[last:i]) + last = i + 1 + } + } + if last == len(parts) { + return nil, fmt.Errorf("Last element in range is '||'") + } + ORparts = append(ORparts, parts[last:]) + return ORparts, nil +} + +// buildVersionRange takes a slice of 2: operator and version +// and builds a versionRange, otherwise an error. +func buildVersionRange(opStr, vStr string) (*versionRange, error) { + c := parseComparator(opStr) + if c == nil { + return nil, fmt.Errorf("Could not parse comparator %q in %q", opStr, strings.Join([]string{opStr, vStr}, "")) + } + v, err := Parse(vStr) + if err != nil { + return nil, fmt.Errorf("Could not parse version %q in %q: %s", vStr, strings.Join([]string{opStr, vStr}, ""), err) + } + + return &versionRange{ + v: v, + c: c, + }, nil + +} + +// inArray checks if a byte is contained in an array of bytes +func inArray(s byte, list []byte) bool { + for _, el := range list { + if el == s { + return true + } + } + return false +} + +// splitAndTrim splits a range string by spaces and cleans whitespaces +func splitAndTrim(s string) (result []string) { + last := 0 + var lastChar byte + excludeFromSplit := []byte{'>', '<', '='} + for i := 0; i < len(s); i++ { + if s[i] == ' ' && !inArray(lastChar, excludeFromSplit) { + if last < i-1 { + result = append(result, s[last:i]) + } + last = i + 1 + } else if s[i] != ' ' { + lastChar = s[i] + } + } + if last < len(s)-1 { + result = append(result, s[last:]) + } + + for i, v := range result { + result[i] = strings.Replace(v, " ", "", -1) + } + + // parts := strings.Split(s, " ") + // for _, x := range parts { + // if s := strings.TrimSpace(x); len(s) != 0 { + // result = append(result, s) + // } + // } + return +} + +// splitComparatorVersion splits the comparator from the version. +// Input must be free of leading or trailing spaces. +func splitComparatorVersion(s string) (string, string, error) { + i := strings.IndexFunc(s, unicode.IsDigit) + if i == -1 { + return "", "", fmt.Errorf("Could not get version from string: %q", s) + } + return strings.TrimSpace(s[0:i]), s[i:], nil +} + +// getWildcardType will return the type of wildcard that the +// passed version contains +func getWildcardType(vStr string) wildcardType { + parts := strings.Split(vStr, ".") + nparts := len(parts) + wildcard := parts[nparts-1] + + possibleWildcardType := wildcardTypefromInt(nparts) + if wildcard == "x" { + return possibleWildcardType + } + + return noneWildcard +} + +// createVersionFromWildcard will convert a wildcard version +// into a regular version, replacing 'x's with '0's, handling +// special cases like '1.x.x' and '1.x' +func createVersionFromWildcard(vStr string) string { + // handle 1.x.x + vStr2 := strings.Replace(vStr, ".x.x", ".x", 1) + vStr2 = strings.Replace(vStr2, ".x", ".0", 1) + parts := strings.Split(vStr2, ".") + + // handle 1.x + if len(parts) == 2 { + return vStr2 + ".0" + } + + return vStr2 +} + +// incrementMajorVersion will increment the major version +// of the passed version +func incrementMajorVersion(vStr string) (string, error) { + parts := strings.Split(vStr, ".") + i, err := strconv.Atoi(parts[0]) + if err != nil { + return "", err + } + parts[0] = strconv.Itoa(i + 1) + + return strings.Join(parts, "."), nil +} + +// incrementMajorVersion will increment the minor version +// of the passed version +func incrementMinorVersion(vStr string) (string, error) { + parts := strings.Split(vStr, ".") + i, err := strconv.Atoi(parts[1]) + if err != nil { + return "", err + } + parts[1] = strconv.Itoa(i + 1) + + return strings.Join(parts, "."), nil +} + +// expandWildcardVersion will expand wildcards inside versions +// following these rules: +// +// * when dealing with patch wildcards: +// >= 1.2.x will become >= 1.2.0 +// <= 1.2.x will become < 1.3.0 +// > 1.2.x will become >= 1.3.0 +// < 1.2.x will become < 1.2.0 +// != 1.2.x will become < 1.2.0 >= 1.3.0 +// +// * when dealing with minor wildcards: +// >= 1.x will become >= 1.0.0 +// <= 1.x will become < 2.0.0 +// > 1.x will become >= 2.0.0 +// < 1.0 will become < 1.0.0 +// != 1.x will become < 1.0.0 >= 2.0.0 +// +// * when dealing with wildcards without +// version operator: +// 1.2.x will become >= 1.2.0 < 1.3.0 +// 1.x will become >= 1.0.0 < 2.0.0 +func expandWildcardVersion(parts [][]string) ([][]string, error) { + var expandedParts [][]string + for _, p := range parts { + var newParts []string + for _, ap := range p { + if strings.Contains(ap, "x") { + opStr, vStr, err := splitComparatorVersion(ap) + if err != nil { + return nil, err + } + + versionWildcardType := getWildcardType(vStr) + flatVersion := createVersionFromWildcard(vStr) + + var resultOperator string + var shouldIncrementVersion bool + switch opStr { + case ">": + resultOperator = ">=" + shouldIncrementVersion = true + case ">=": + resultOperator = ">=" + case "<": + resultOperator = "<" + case "<=": + resultOperator = "<" + shouldIncrementVersion = true + case "", "=", "==": + newParts = append(newParts, ">="+flatVersion) + resultOperator = "<" + shouldIncrementVersion = true + case "!=", "!": + newParts = append(newParts, "<"+flatVersion) + resultOperator = ">=" + shouldIncrementVersion = true + } + + var resultVersion string + if shouldIncrementVersion { + switch versionWildcardType { + case patchWildcard: + resultVersion, _ = incrementMinorVersion(flatVersion) + case minorWildcard: + resultVersion, _ = incrementMajorVersion(flatVersion) + } + } else { + resultVersion = flatVersion + } + + ap = resultOperator + resultVersion + } + newParts = append(newParts, ap) + } + expandedParts = append(expandedParts, newParts) + } + + return expandedParts, nil +} + +func parseComparator(s string) comparator { + switch s { + case "==": + fallthrough + case "": + fallthrough + case "=": + return compEQ + case ">": + return compGT + case ">=": + return compGE + case "<": + return compLT + case "<=": + return compLE + case "!": + fallthrough + case "!=": + return compNE + } + + return nil +} + +// MustParseRange is like ParseRange but panics if the range cannot be parsed. +func MustParseRange(s string) Range { + r, err := ParseRange(s) + if err != nil { + panic(`semver: ParseRange(` + s + `): ` + err.Error()) + } + return r +} diff --git a/vendor/github.com/blang/semver/v4/semver.go b/vendor/github.com/blang/semver/v4/semver.go new file mode 100644 index 000000000..307de610f --- /dev/null +++ b/vendor/github.com/blang/semver/v4/semver.go @@ -0,0 +1,476 @@ +package semver + +import ( + "errors" + "fmt" + "strconv" + "strings" +) + +const ( + numbers string = "0123456789" + alphas = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-" + alphanum = alphas + numbers +) + +// SpecVersion is the latest fully supported spec version of semver +var SpecVersion = Version{ + Major: 2, + Minor: 0, + Patch: 0, +} + +// Version represents a semver compatible version +type Version struct { + Major uint64 + Minor uint64 + Patch uint64 + Pre []PRVersion + Build []string //No Precedence +} + +// Version to string +func (v Version) String() string { + b := make([]byte, 0, 5) + b = strconv.AppendUint(b, v.Major, 10) + b = append(b, '.') + b = strconv.AppendUint(b, v.Minor, 10) + b = append(b, '.') + b = strconv.AppendUint(b, v.Patch, 10) + + if len(v.Pre) > 0 { + b = append(b, '-') + b = append(b, v.Pre[0].String()...) + + for _, pre := range v.Pre[1:] { + b = append(b, '.') + b = append(b, pre.String()...) + } + } + + if len(v.Build) > 0 { + b = append(b, '+') + b = append(b, v.Build[0]...) + + for _, build := range v.Build[1:] { + b = append(b, '.') + b = append(b, build...) + } + } + + return string(b) +} + +// FinalizeVersion discards prerelease and build number and only returns +// major, minor and patch number. +func (v Version) FinalizeVersion() string { + b := make([]byte, 0, 5) + b = strconv.AppendUint(b, v.Major, 10) + b = append(b, '.') + b = strconv.AppendUint(b, v.Minor, 10) + b = append(b, '.') + b = strconv.AppendUint(b, v.Patch, 10) + return string(b) +} + +// Equals checks if v is equal to o. +func (v Version) Equals(o Version) bool { + return (v.Compare(o) == 0) +} + +// EQ checks if v is equal to o. +func (v Version) EQ(o Version) bool { + return (v.Compare(o) == 0) +} + +// NE checks if v is not equal to o. +func (v Version) NE(o Version) bool { + return (v.Compare(o) != 0) +} + +// GT checks if v is greater than o. +func (v Version) GT(o Version) bool { + return (v.Compare(o) == 1) +} + +// GTE checks if v is greater than or equal to o. +func (v Version) GTE(o Version) bool { + return (v.Compare(o) >= 0) +} + +// GE checks if v is greater than or equal to o. +func (v Version) GE(o Version) bool { + return (v.Compare(o) >= 0) +} + +// LT checks if v is less than o. +func (v Version) LT(o Version) bool { + return (v.Compare(o) == -1) +} + +// LTE checks if v is less than or equal to o. +func (v Version) LTE(o Version) bool { + return (v.Compare(o) <= 0) +} + +// LE checks if v is less than or equal to o. +func (v Version) LE(o Version) bool { + return (v.Compare(o) <= 0) +} + +// Compare compares Versions v to o: +// -1 == v is less than o +// 0 == v is equal to o +// 1 == v is greater than o +func (v Version) Compare(o Version) int { + if v.Major != o.Major { + if v.Major > o.Major { + return 1 + } + return -1 + } + if v.Minor != o.Minor { + if v.Minor > o.Minor { + return 1 + } + return -1 + } + if v.Patch != o.Patch { + if v.Patch > o.Patch { + return 1 + } + return -1 + } + + // Quick comparison if a version has no prerelease versions + if len(v.Pre) == 0 && len(o.Pre) == 0 { + return 0 + } else if len(v.Pre) == 0 && len(o.Pre) > 0 { + return 1 + } else if len(v.Pre) > 0 && len(o.Pre) == 0 { + return -1 + } + + i := 0 + for ; i < len(v.Pre) && i < len(o.Pre); i++ { + if comp := v.Pre[i].Compare(o.Pre[i]); comp == 0 { + continue + } else if comp == 1 { + return 1 + } else { + return -1 + } + } + + // If all pr versions are the equal but one has further prversion, this one greater + if i == len(v.Pre) && i == len(o.Pre) { + return 0 + } else if i == len(v.Pre) && i < len(o.Pre) { + return -1 + } else { + return 1 + } + +} + +// IncrementPatch increments the patch version +func (v *Version) IncrementPatch() error { + v.Patch++ + return nil +} + +// IncrementMinor increments the minor version +func (v *Version) IncrementMinor() error { + v.Minor++ + v.Patch = 0 + return nil +} + +// IncrementMajor increments the major version +func (v *Version) IncrementMajor() error { + v.Major++ + v.Minor = 0 + v.Patch = 0 + return nil +} + +// Validate validates v and returns error in case +func (v Version) Validate() error { + // Major, Minor, Patch already validated using uint64 + + for _, pre := range v.Pre { + if !pre.IsNum { //Numeric prerelease versions already uint64 + if len(pre.VersionStr) == 0 { + return fmt.Errorf("Prerelease can not be empty %q", pre.VersionStr) + } + if !containsOnly(pre.VersionStr, alphanum) { + return fmt.Errorf("Invalid character(s) found in prerelease %q", pre.VersionStr) + } + } + } + + for _, build := range v.Build { + if len(build) == 0 { + return fmt.Errorf("Build meta data can not be empty %q", build) + } + if !containsOnly(build, alphanum) { + return fmt.Errorf("Invalid character(s) found in build meta data %q", build) + } + } + + return nil +} + +// New is an alias for Parse and returns a pointer, parses version string and returns a validated Version or error +func New(s string) (*Version, error) { + v, err := Parse(s) + vp := &v + return vp, err +} + +// Make is an alias for Parse, parses version string and returns a validated Version or error +func Make(s string) (Version, error) { + return Parse(s) +} + +// ParseTolerant allows for certain version specifications that do not strictly adhere to semver +// specs to be parsed by this library. It does so by normalizing versions before passing them to +// Parse(). It currently trims spaces, removes a "v" prefix, adds a 0 patch number to versions +// with only major and minor components specified, and removes leading 0s. +func ParseTolerant(s string) (Version, error) { + s = strings.TrimSpace(s) + s = strings.TrimPrefix(s, "v") + + // Split into major.minor.(patch+pr+meta) + parts := strings.SplitN(s, ".", 3) + // Remove leading zeros. + for i, p := range parts { + if len(p) > 1 { + p = strings.TrimLeft(p, "0") + if len(p) == 0 || !strings.ContainsAny(p[0:1], "0123456789") { + p = "0" + p + } + parts[i] = p + } + } + // Fill up shortened versions. + if len(parts) < 3 { + if strings.ContainsAny(parts[len(parts)-1], "+-") { + return Version{}, errors.New("Short version cannot contain PreRelease/Build meta data") + } + for len(parts) < 3 { + parts = append(parts, "0") + } + } + s = strings.Join(parts, ".") + + return Parse(s) +} + +// Parse parses version string and returns a validated Version or error +func Parse(s string) (Version, error) { + if len(s) == 0 { + return Version{}, errors.New("Version string empty") + } + + // Split into major.minor.(patch+pr+meta) + parts := strings.SplitN(s, ".", 3) + if len(parts) != 3 { + return Version{}, errors.New("No Major.Minor.Patch elements found") + } + + // Major + if !containsOnly(parts[0], numbers) { + return Version{}, fmt.Errorf("Invalid character(s) found in major number %q", parts[0]) + } + if hasLeadingZeroes(parts[0]) { + return Version{}, fmt.Errorf("Major number must not contain leading zeroes %q", parts[0]) + } + major, err := strconv.ParseUint(parts[0], 10, 64) + if err != nil { + return Version{}, err + } + + // Minor + if !containsOnly(parts[1], numbers) { + return Version{}, fmt.Errorf("Invalid character(s) found in minor number %q", parts[1]) + } + if hasLeadingZeroes(parts[1]) { + return Version{}, fmt.Errorf("Minor number must not contain leading zeroes %q", parts[1]) + } + minor, err := strconv.ParseUint(parts[1], 10, 64) + if err != nil { + return Version{}, err + } + + v := Version{} + v.Major = major + v.Minor = minor + + var build, prerelease []string + patchStr := parts[2] + + if buildIndex := strings.IndexRune(patchStr, '+'); buildIndex != -1 { + build = strings.Split(patchStr[buildIndex+1:], ".") + patchStr = patchStr[:buildIndex] + } + + if preIndex := strings.IndexRune(patchStr, '-'); preIndex != -1 { + prerelease = strings.Split(patchStr[preIndex+1:], ".") + patchStr = patchStr[:preIndex] + } + + if !containsOnly(patchStr, numbers) { + return Version{}, fmt.Errorf("Invalid character(s) found in patch number %q", patchStr) + } + if hasLeadingZeroes(patchStr) { + return Version{}, fmt.Errorf("Patch number must not contain leading zeroes %q", patchStr) + } + patch, err := strconv.ParseUint(patchStr, 10, 64) + if err != nil { + return Version{}, err + } + + v.Patch = patch + + // Prerelease + for _, prstr := range prerelease { + parsedPR, err := NewPRVersion(prstr) + if err != nil { + return Version{}, err + } + v.Pre = append(v.Pre, parsedPR) + } + + // Build meta data + for _, str := range build { + if len(str) == 0 { + return Version{}, errors.New("Build meta data is empty") + } + if !containsOnly(str, alphanum) { + return Version{}, fmt.Errorf("Invalid character(s) found in build meta data %q", str) + } + v.Build = append(v.Build, str) + } + + return v, nil +} + +// MustParse is like Parse but panics if the version cannot be parsed. +func MustParse(s string) Version { + v, err := Parse(s) + if err != nil { + panic(`semver: Parse(` + s + `): ` + err.Error()) + } + return v +} + +// PRVersion represents a PreRelease Version +type PRVersion struct { + VersionStr string + VersionNum uint64 + IsNum bool +} + +// NewPRVersion creates a new valid prerelease version +func NewPRVersion(s string) (PRVersion, error) { + if len(s) == 0 { + return PRVersion{}, errors.New("Prerelease is empty") + } + v := PRVersion{} + if containsOnly(s, numbers) { + if hasLeadingZeroes(s) { + return PRVersion{}, fmt.Errorf("Numeric PreRelease version must not contain leading zeroes %q", s) + } + num, err := strconv.ParseUint(s, 10, 64) + + // Might never be hit, but just in case + if err != nil { + return PRVersion{}, err + } + v.VersionNum = num + v.IsNum = true + } else if containsOnly(s, alphanum) { + v.VersionStr = s + v.IsNum = false + } else { + return PRVersion{}, fmt.Errorf("Invalid character(s) found in prerelease %q", s) + } + return v, nil +} + +// IsNumeric checks if prerelease-version is numeric +func (v PRVersion) IsNumeric() bool { + return v.IsNum +} + +// Compare compares two PreRelease Versions v and o: +// -1 == v is less than o +// 0 == v is equal to o +// 1 == v is greater than o +func (v PRVersion) Compare(o PRVersion) int { + if v.IsNum && !o.IsNum { + return -1 + } else if !v.IsNum && o.IsNum { + return 1 + } else if v.IsNum && o.IsNum { + if v.VersionNum == o.VersionNum { + return 0 + } else if v.VersionNum > o.VersionNum { + return 1 + } else { + return -1 + } + } else { // both are Alphas + if v.VersionStr == o.VersionStr { + return 0 + } else if v.VersionStr > o.VersionStr { + return 1 + } else { + return -1 + } + } +} + +// PreRelease version to string +func (v PRVersion) String() string { + if v.IsNum { + return strconv.FormatUint(v.VersionNum, 10) + } + return v.VersionStr +} + +func containsOnly(s string, set string) bool { + return strings.IndexFunc(s, func(r rune) bool { + return !strings.ContainsRune(set, r) + }) == -1 +} + +func hasLeadingZeroes(s string) bool { + return len(s) > 1 && s[0] == '0' +} + +// NewBuildVersion creates a new valid build version +func NewBuildVersion(s string) (string, error) { + if len(s) == 0 { + return "", errors.New("Buildversion is empty") + } + if !containsOnly(s, alphanum) { + return "", fmt.Errorf("Invalid character(s) found in build meta data %q", s) + } + return s, nil +} + +// FinalizeVersion returns the major, minor and patch number only and discards +// prerelease and build number. +func FinalizeVersion(s string) (string, error) { + v, err := Parse(s) + if err != nil { + return "", err + } + v.Pre = nil + v.Build = nil + + finalVer := v.String() + return finalVer, nil +} diff --git a/vendor/github.com/blang/semver/v4/sort.go b/vendor/github.com/blang/semver/v4/sort.go new file mode 100644 index 000000000..e18f88082 --- /dev/null +++ b/vendor/github.com/blang/semver/v4/sort.go @@ -0,0 +1,28 @@ +package semver + +import ( + "sort" +) + +// Versions represents multiple versions. +type Versions []Version + +// Len returns length of version collection +func (s Versions) Len() int { + return len(s) +} + +// Swap swaps two versions inside the collection by its indices +func (s Versions) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +// Less checks if version at index i is less than version at index j +func (s Versions) Less(i, j int) bool { + return s[i].LT(s[j]) +} + +// Sort sorts a slice of versions +func Sort(versions []Version) { + sort.Sort(Versions(versions)) +} diff --git a/vendor/github.com/blang/semver/v4/sql.go b/vendor/github.com/blang/semver/v4/sql.go new file mode 100644 index 000000000..db958134f --- /dev/null +++ b/vendor/github.com/blang/semver/v4/sql.go @@ -0,0 +1,30 @@ +package semver + +import ( + "database/sql/driver" + "fmt" +) + +// Scan implements the database/sql.Scanner interface. +func (v *Version) Scan(src interface{}) (err error) { + var str string + switch src := src.(type) { + case string: + str = src + case []byte: + str = string(src) + default: + return fmt.Errorf("version.Scan: cannot convert %T to string", src) + } + + if t, err := Parse(str); err == nil { + *v = t + } + + return +} + +// Value implements the database/sql/driver.Valuer interface. +func (v Version) Value() (driver.Value, error) { + return v.String(), nil +} diff --git a/vendor/github.com/clarketm/json/decode.go b/vendor/github.com/clarketm/json/decode.go index b43484692..a9917e72c 100644 --- a/vendor/github.com/clarketm/json/decode.go +++ b/vendor/github.com/clarketm/json/decode.go @@ -200,22 +200,22 @@ func (n Number) Int64() (int64, error) { return strconv.ParseInt(string(n), 10, 64) } +// An errorContext provides context for type errors during decoding. +type errorContext struct { + Struct reflect.Type + FieldStack []string +} + // decodeState represents the state while decoding a JSON value. type decodeState struct { - data []byte - off int // next read offset in data - opcode int // last read result - scan scanner - errorContext struct { // provides context for type errors - Struct reflect.Type - FieldStack []string - } + data []byte + off int // next read offset in data + opcode int // last read result + scan scanner + errorContext *errorContext savedError error useNumber bool disallowUnknownFields bool - // safeUnquote is the number of current string literal bytes that don't - // need to be unquoted. When negative, no bytes need unquoting. - safeUnquote int } // readIndex returns the position of the last byte read. @@ -232,10 +232,11 @@ func (d *decodeState) init(data []byte) *decodeState { d.data = data d.off = 0 d.savedError = nil - d.errorContext.Struct = nil - - // Reuse the allocated space for the FieldStack slice. - d.errorContext.FieldStack = d.errorContext.FieldStack[:0] + if d.errorContext != nil { + d.errorContext.Struct = nil + // Reuse the allocated space for the FieldStack slice. + d.errorContext.FieldStack = d.errorContext.FieldStack[:0] + } return d } @@ -249,12 +250,11 @@ func (d *decodeState) saveError(err error) { // addErrorContext returns a new error enhanced with information from d.errorContext func (d *decodeState) addErrorContext(err error) error { - if d.errorContext.Struct != nil || len(d.errorContext.FieldStack) > 0 { + if d.errorContext != nil && (d.errorContext.Struct != nil || len(d.errorContext.FieldStack) > 0) { switch err := err.(type) { case *UnmarshalTypeError: err.Struct = d.errorContext.Struct.Name() err.Field = strings.Join(d.errorContext.FieldStack, ".") - return err } } return err @@ -317,27 +317,13 @@ func (d *decodeState) rescanLiteral() { Switch: switch data[i-1] { case '"': // string - // safeUnquote is initialized at -1, which means that all bytes - // checked so far can be unquoted at a later time with no work - // at all. When reaching the closing '"', if safeUnquote is - // still -1, all bytes can be unquoted with no work. Otherwise, - // only those bytes up until the first '\\' or non-ascii rune - // can be safely unquoted. - safeUnquote := -1 for ; i < len(data); i++ { - if c := data[i]; c == '\\' { - if safeUnquote < 0 { // first unsafe byte - safeUnquote = int(i - d.off) - } + switch data[i] { + case '\\': i++ // escaped char - } else if c == '"' { - d.safeUnquote = safeUnquote + case '"': i++ // tokenize the closing quote too break Switch - } else if c >= utf8.RuneSelf { - if safeUnquote < 0 { // first unsafe byte - safeUnquote = int(i - d.off) - } } } case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '-': // number @@ -674,7 +660,10 @@ func (d *decodeState) object(v reflect.Value) error { } var mapElem reflect.Value - origErrorContext := d.errorContext + var origErrorContext errorContext + if d.errorContext != nil { + origErrorContext = *d.errorContext + } for { // Read opening " of string key or closing }. @@ -691,7 +680,7 @@ func (d *decodeState) object(v reflect.Value) error { start := d.readIndex() d.rescanLiteral() item := d.data[start:d.readIndex()] - key, ok := d.unquoteBytes(item) + key, ok := unquoteBytes(item) if !ok { panic(phasePanicMsg) } @@ -749,6 +738,9 @@ func (d *decodeState) object(v reflect.Value) error { } subv = subv.Field(i) } + if d.errorContext == nil { + d.errorContext = new(errorContext) + } d.errorContext.FieldStack = append(d.errorContext.FieldStack, f.name) d.errorContext.Struct = t } else if d.disallowUnknownFields { @@ -829,11 +821,13 @@ func (d *decodeState) object(v reflect.Value) error { if d.opcode == scanSkipSpace { d.scanWhile(scanSkipSpace) } - // Reset errorContext to its original state. - // Keep the same underlying array for FieldStack, to reuse the - // space and avoid unnecessary allocs. - d.errorContext.FieldStack = d.errorContext.FieldStack[:len(origErrorContext.FieldStack)] - d.errorContext.Struct = origErrorContext.Struct + if d.errorContext != nil { + // Reset errorContext to its original state. + // Keep the same underlying array for FieldStack, to reuse the + // space and avoid unnecessary allocs. + d.errorContext.FieldStack = d.errorContext.FieldStack[:len(origErrorContext.FieldStack)] + d.errorContext.Struct = origErrorContext.Struct + } if d.opcode == scanEndObject { break } @@ -892,7 +886,7 @@ func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool d.saveError(&UnmarshalTypeError{Value: val, Type: v.Type(), Offset: int64(d.readIndex())}) return nil } - s, ok := d.unquoteBytes(item) + s, ok := unquoteBytes(item) if !ok { if fromQuoted { return fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()) @@ -943,7 +937,7 @@ func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool } case '"': // string - s, ok := d.unquoteBytes(item) + s, ok := unquoteBytes(item) if !ok { if fromQuoted { return fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()) @@ -1103,7 +1097,7 @@ func (d *decodeState) objectInterface() map[string]interface{} { start := d.readIndex() d.rescanLiteral() item := d.data[start:d.readIndex()] - key, ok := d.unquote(item) + key, ok := unquote(item) if !ok { panic(phasePanicMsg) } @@ -1152,7 +1146,7 @@ func (d *decodeState) literalInterface() interface{} { return c == 't' case '"': // string - s, ok := d.unquote(item) + s, ok := unquote(item) if !ok { panic(phasePanicMsg) } @@ -1195,26 +1189,38 @@ func getu4(s []byte) rune { // unquote converts a quoted JSON string literal s into an actual string t. // The rules are different than for Go, so cannot use strconv.Unquote. -// The first byte in s must be '"'. -func (d *decodeState) unquote(s []byte) (t string, ok bool) { - s, ok = d.unquoteBytes(s) +func unquote(s []byte) (t string, ok bool) { + s, ok = unquoteBytes(s) t = string(s) return } -func (d *decodeState) unquoteBytes(s []byte) (t []byte, ok bool) { - // We already know that s[0] == '"'. However, we don't know that the - // closing quote exists in all cases, such as when the string is nested - // via the ",string" option. - if len(s) < 2 || s[len(s)-1] != '"' { +func unquoteBytes(s []byte) (t []byte, ok bool) { + if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' { return } s = s[1 : len(s)-1] - // If there are no unusual characters, no unquoting is needed, so return - // a slice of the original bytes. - r := d.safeUnquote - if r == -1 { + // Check for unusual characters. If there are none, + // then no unquoting is needed, so return a slice of the + // original bytes. + r := 0 + for r < len(s) { + c := s[r] + if c == '\\' || c == '"' || c < ' ' { + break + } + if c < utf8.RuneSelf { + r++ + continue + } + rr, size := utf8.DecodeRune(s[r:]) + if rr == utf8.RuneError && size == 1 { + break + } + r += size + } + if r == len(s) { return s, true } diff --git a/vendor/github.com/clarketm/json/encode.go b/vendor/github.com/clarketm/json/encode.go index 1b45610ab..06b2f754c 100644 --- a/vendor/github.com/clarketm/json/encode.go +++ b/vendor/github.com/clarketm/json/encode.go @@ -153,7 +153,7 @@ import ( // // JSON cannot represent cyclic data structures and Marshal does not // handle them. Passing cyclic structures to Marshal will result in -// an infinite recursion. +// an error. // func Marshal(v interface{}) ([]byte, error) { e := newEncodeState() @@ -236,6 +236,8 @@ func (e *UnsupportedTypeError) Error() string { return "json: unsupported type: " + e.Type.String() } +// An UnsupportedValueError is returned by Marshal when attempting +// to encode an unsupported value. type UnsupportedValueError struct { Value reflect.Value Str string @@ -285,17 +287,31 @@ var hex = "0123456789abcdef" type encodeState struct { bytes.Buffer // accumulated output scratch [64]byte + + // Keep track of what pointers we've seen in the current recursive call + // path, to avoid cycles that could lead to a stack overflow. Only do + // the relatively expensive map operations if ptrLevel is larger than + // startDetectingCyclesAfter, so that we skip the work if we're within a + // reasonable amount of nested pointers deep. + ptrLevel uint + ptrSeen map[interface{}]struct{} } +const startDetectingCyclesAfter = 1000 + var encodeStatePool sync.Pool func newEncodeState() *encodeState { if v := encodeStatePool.Get(); v != nil { e := v.(*encodeState) e.Reset() + if len(e.ptrSeen) > 0 { + panic("ptrEncoder.encode should have emptied ptrSeen via defers") + } + e.ptrLevel = 0 return e } - return new(encodeState) + return &encodeState{ptrSeen: make(map[interface{}]struct{})} } // jsonError is an error wrapper type for internal use only. @@ -632,11 +648,12 @@ func stringEncoder(e *encodeState, v reflect.Value, opts encOpts) { return } if opts.quoted { - b := make([]byte, 0, v.Len()+2) - b = append(b, '"') - b = append(b, []byte(v.String())...) - b = append(b, '"') - e.stringBytes(b, opts.escapeHTML) + e2 := newEncodeState() + // Since we encode the string twice, we only need to escape HTML + // the first time. + e2.string(v.String(), opts.escapeHTML) + e.stringBytes(e2.Bytes(), false) + encodeStatePool.Put(e2) } else { e.string(v.String(), opts.escapeHTML) } @@ -646,7 +663,7 @@ func stringEncoder(e *encodeState, v reflect.Value, opts encOpts) { func isValidNumber(s string) bool { // This function implements the JSON numbers grammar. // See https://tools.ietf.org/html/rfc7159#section-6 - // and https://json.org/number.gif + // and https://www.json.org/img/number.png if s == "" { return false @@ -775,28 +792,40 @@ func (me mapEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) { e.WriteString("null") return } + if e.ptrLevel++; e.ptrLevel > startDetectingCyclesAfter { + // We're a large number of nested ptrEncoder.encode calls deep; + // start checking if we've run into a pointer cycle. + ptr := v.Pointer() + if _, ok := e.ptrSeen[ptr]; ok { + e.error(&UnsupportedValueError{v, fmt.Sprintf("encountered a cycle via %s", v.Type())}) + } + e.ptrSeen[ptr] = struct{}{} + defer delete(e.ptrSeen, ptr) + } e.WriteByte('{') // Extract and sort the keys. - keys := v.MapKeys() - sv := make([]reflectWithString, len(keys)) - for i, v := range keys { - sv[i].v = v + sv := make([]reflectWithString, v.Len()) + mi := v.MapRange() + for i := 0; mi.Next(); i++ { + sv[i].k = mi.Key() + sv[i].v = mi.Value() if err := sv[i].resolve(); err != nil { e.error(fmt.Errorf("json: encoding error for type %q: %q", v.Type().String(), err.Error())) } } - sort.Slice(sv, func(i, j int) bool { return sv[i].s < sv[j].s }) + sort.Slice(sv, func(i, j int) bool { return sv[i].ks < sv[j].ks }) for i, kv := range sv { if i > 0 { e.WriteByte(',') } - e.string(kv.s, opts.escapeHTML) + e.string(kv.ks, opts.escapeHTML) e.WriteByte(':') - me.elemEnc(e, v.MapIndex(kv.v), opts) + me.elemEnc(e, kv.v, opts) } e.WriteByte('}') + e.ptrLevel-- } func newMapEncoder(t reflect.Type) encoderFunc { @@ -853,7 +882,23 @@ func (se sliceEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) { e.WriteString("null") return } + if e.ptrLevel++; e.ptrLevel > startDetectingCyclesAfter { + // We're a large number of nested ptrEncoder.encode calls deep; + // start checking if we've run into a pointer cycle. + // Here we use a struct to memorize the pointer to the first element of the slice + // and its length. + ptr := struct { + ptr uintptr + len int + }{v.Pointer(), v.Len()} + if _, ok := e.ptrSeen[ptr]; ok { + e.error(&UnsupportedValueError{v, fmt.Sprintf("encountered a cycle via %s", v.Type())}) + } + e.ptrSeen[ptr] = struct{}{} + defer delete(e.ptrSeen, ptr) + } se.arrayEnc(e, v, opts) + e.ptrLevel-- } func newSliceEncoder(t reflect.Type) encoderFunc { @@ -898,7 +943,18 @@ func (pe ptrEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) { e.WriteString("null") return } + if e.ptrLevel++; e.ptrLevel > startDetectingCyclesAfter { + // We're a large number of nested ptrEncoder.encode calls deep; + // start checking if we've run into a pointer cycle. + ptr := v.Interface() + if _, ok := e.ptrSeen[ptr]; ok { + e.error(&UnsupportedValueError{v, fmt.Sprintf("encountered a cycle via %s", v.Type())}) + } + e.ptrSeen[ptr] = struct{}{} + defer delete(e.ptrSeen, ptr) + } pe.elemEnc(e, v.Elem(), opts) + e.ptrLevel-- } func newPtrEncoder(t reflect.Type) encoderFunc { @@ -931,7 +987,7 @@ func isValidTag(s string) bool { } for _, c := range s { switch { - case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c): + case strings.ContainsRune("!#$%&()*+-./:;<=>?@[]^_{|}~ ", c): // Backslash and quote chars are reserved, but // otherwise any punctuation chars are allowed // in a tag name. @@ -953,29 +1009,30 @@ func typeByIndex(t reflect.Type, index []int) reflect.Type { } type reflectWithString struct { - v reflect.Value - s string + k reflect.Value + v reflect.Value + ks string } func (w *reflectWithString) resolve() error { - if w.v.Kind() == reflect.String { - w.s = w.v.String() + if w.k.Kind() == reflect.String { + w.ks = w.k.String() return nil } - if tm, ok := w.v.Interface().(encoding.TextMarshaler); ok { - if w.v.Kind() == reflect.Ptr && w.v.IsNil() { + if tm, ok := w.k.Interface().(encoding.TextMarshaler); ok { + if w.k.Kind() == reflect.Ptr && w.k.IsNil() { return nil } buf, err := tm.MarshalText() - w.s = string(buf) + w.ks = string(buf) return err } - switch w.v.Kind() { + switch w.k.Kind() { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - w.s = strconv.FormatInt(w.v.Int(), 10) + w.ks = strconv.FormatInt(w.k.Int(), 10) return nil case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - w.s = strconv.FormatUint(w.v.Uint(), 10) + w.ks = strconv.FormatUint(w.k.Uint(), 10) return nil } panic("unexpected map key type") @@ -1195,19 +1252,18 @@ func typeFields(t reflect.Type) structFields { // Scan f.typ for fields to include. for i := 0; i < f.typ.NumField(); i++ { sf := f.typ.Field(i) - isUnexported := sf.PkgPath != "" if sf.Anonymous { t := sf.Type if t.Kind() == reflect.Ptr { t = t.Elem() } - if isUnexported && t.Kind() != reflect.Struct { + if !sf.IsExported() && t.Kind() != reflect.Struct { // Ignore embedded fields of unexported non-struct types. continue } // Do not ignore embedded fields of unexported struct types // since they may have exported fields. - } else if isUnexported { + } else if !sf.IsExported() { // Ignore unexported non-embedded fields. continue } diff --git a/vendor/github.com/clarketm/json/fuzz.go b/vendor/github.com/clarketm/json/fuzz.go index be03f0d7f..d3fa2d111 100644 --- a/vendor/github.com/clarketm/json/fuzz.go +++ b/vendor/github.com/clarketm/json/fuzz.go @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +//go:build gofuzz // +build gofuzz package json diff --git a/vendor/github.com/clarketm/json/scanner.go b/vendor/github.com/clarketm/json/scanner.go index 552bd7036..9dc1903e2 100644 --- a/vendor/github.com/clarketm/json/scanner.go +++ b/vendor/github.com/clarketm/json/scanner.go @@ -139,6 +139,10 @@ const ( parseArrayValue // parsing array value ) +// This limits the max nesting depth to prevent stack overflow. +// This is permitted by https://tools.ietf.org/html/rfc7159#section-9 +const maxNestingDepth = 10000 + // reset prepares the scanner for use. // It must be called before calling s.step. func (s *scanner) reset() { @@ -168,8 +172,13 @@ func (s *scanner) eof() int { } // pushParseState pushes a new parse state p onto the parse stack. -func (s *scanner) pushParseState(p int) { - s.parseState = append(s.parseState, p) +// an error state is returned if maxNestingDepth was exceeded, otherwise successState is returned. +func (s *scanner) pushParseState(c byte, newParseState int, successState int) int { + s.parseState = append(s.parseState, newParseState) + if len(s.parseState) <= maxNestingDepth { + return successState + } + return s.error(c, "exceeded max depth") } // popParseState pops a parse state (already obtained) off the stack @@ -186,12 +195,12 @@ func (s *scanner) popParseState() { } func isSpace(c byte) bool { - return c == ' ' || c == '\t' || c == '\r' || c == '\n' + return c <= ' ' && (c == ' ' || c == '\t' || c == '\r' || c == '\n') } // stateBeginValueOrEmpty is the state after reading `[`. func stateBeginValueOrEmpty(s *scanner, c byte) int { - if c <= ' ' && isSpace(c) { + if isSpace(c) { return scanSkipSpace } if c == ']' { @@ -202,18 +211,16 @@ func stateBeginValueOrEmpty(s *scanner, c byte) int { // stateBeginValue is the state at the beginning of the input. func stateBeginValue(s *scanner, c byte) int { - if c <= ' ' && isSpace(c) { + if isSpace(c) { return scanSkipSpace } switch c { case '{': s.step = stateBeginStringOrEmpty - s.pushParseState(parseObjectKey) - return scanBeginObject + return s.pushParseState(c, parseObjectKey, scanBeginObject) case '[': s.step = stateBeginValueOrEmpty - s.pushParseState(parseArrayValue) - return scanBeginArray + return s.pushParseState(c, parseArrayValue, scanBeginArray) case '"': s.step = stateInString return scanBeginLiteral @@ -242,7 +249,7 @@ func stateBeginValue(s *scanner, c byte) int { // stateBeginStringOrEmpty is the state after reading `{`. func stateBeginStringOrEmpty(s *scanner, c byte) int { - if c <= ' ' && isSpace(c) { + if isSpace(c) { return scanSkipSpace } if c == '}' { @@ -255,7 +262,7 @@ func stateBeginStringOrEmpty(s *scanner, c byte) int { // stateBeginString is the state after reading `{"key": value,`. func stateBeginString(s *scanner, c byte) int { - if c <= ' ' && isSpace(c) { + if isSpace(c) { return scanSkipSpace } if c == '"' { @@ -275,7 +282,7 @@ func stateEndValue(s *scanner, c byte) int { s.endTop = true return stateEndTop(s, c) } - if c <= ' ' && isSpace(c) { + if isSpace(c) { s.step = stateEndValue return scanSkipSpace } diff --git a/vendor/github.com/coreos/go-json/README.md b/vendor/github.com/coreos/go-json/README.md index 800f9c1e3..ee0dafec1 100644 --- a/vendor/github.com/coreos/go-json/README.md +++ b/vendor/github.com/coreos/go-json/README.md @@ -3,15 +3,15 @@ This is a fork of Go's `encoding/json` library. It adds a third target for unmarshalling, `json.Node`. -Unmarshalling to a `Node` behaves similarly to unmarshalling to an -`interface{}`, except that it also records the offsets for the start and end +Unmarshalling to a `Node` behaves similarly to unmarshalling to +`any`, except that it also records the offsets for the start and end of the value that is unmarshalled and, if the value is part of a JSON object, the offsets of the start and end of the object's key. The `Value` -field of the `Node` is unmarshalled to the same type as if it were an -`interface{}`, except in the case of arrays and objects: +field of the `Node` is unmarshalled to the same type as if it were +`any`, except in the case of arrays and objects: -| JSON type | Go type, unmarshalled to `interface{}` | `Node.Value` type | -| --------- | -------------------------------------- | ----------------- | -| Array | `[]interface{}` | `[]Node` | -| Object | `map[string]interface{}` | `map[string]Node` | -| Other | `interface{}` | `interface{}` | +| JSON type | Go type, unmarshalled to `any` | `Node.Value` type | +| --------- | ------------------------------ | ----------------- | +| Array | `[]any` | `[]Node` | +| Object | `map[string]any` | `map[string]Node` | +| Other | `any` | `any` | diff --git a/vendor/github.com/coreos/go-json/decode.go b/vendor/github.com/coreos/go-json/decode.go index 1966dcdf4..ae0730936 100644 --- a/vendor/github.com/coreos/go-json/decode.go +++ b/vendor/github.com/coreos/go-json/decode.go @@ -93,7 +93,7 @@ import ( // Instead, they are replaced by the Unicode replacement // character U+FFFD. // -func Unmarshal(data []byte, v interface{}) error { +func Unmarshal(data []byte, v any) error { // Check for well-formedness. // Avoids filling out half a data structure // before discovering a JSON syntax error. @@ -161,7 +161,7 @@ func (e *InvalidUnmarshalError) Error() string { return "json: Unmarshal(nil)" } - if e.Type.Kind() != reflect.Ptr { + if e.Type.Kind() != reflect.Pointer { return "json: Unmarshal(non-pointer " + e.Type.String() + ")" } return "json: Unmarshal(nil " + e.Type.String() + ")" @@ -172,21 +172,21 @@ type Node struct { End int KeyStart int // Only value if a member of a struct KeyEnd int - Value interface{} + Value any } // Recursively disintermediate any Nodes in the argument. -func unwrapNode(in interface{}) interface{} { +func unwrapNode(in any) any { if node, ok := in.(Node); ok { return unwrapNode(node.Value) } else if arr, ok := in.([]Node); ok { - ret := make([]interface{}, len(arr)) + ret := make([]any, len(arr)) for i, ent := range arr { ret[i] = unwrapNode(ent.Value) } return ret } else if obj, ok := in.(map[string]Node); ok { - ret := make(map[string]interface{}) + ret := make(map[string]any) for k, v := range obj { ret[k] = unwrapNode(v.Value) } @@ -196,9 +196,9 @@ func unwrapNode(in interface{}) interface{} { } } -func (d *decodeState) unmarshal(v interface{}) error { +func (d *decodeState) unmarshal(v any) error { rv := reflect.ValueOf(v) - if rv.Kind() != reflect.Ptr || rv.IsNil() { + if rv.Kind() != reflect.Pointer || rv.IsNil() { return &InvalidUnmarshalError{reflect.TypeOf(v)} } @@ -229,16 +229,19 @@ func (n Number) Int64() (int64, error) { return strconv.ParseInt(string(n), 10, 64) } +// An errorContext provides context for type errors during decoding. +type errorContext struct { + Struct reflect.Type + FieldStack []string +} + // decodeState represents the state while decoding a JSON value. type decodeState struct { - data []byte - off int // next read offset in data - opcode int // last read result - scan scanner - errorContext struct { // provides context for type errors - Struct reflect.Type - FieldStack []string - } + data []byte + off int // next read offset in data + opcode int // last read result + scan scanner + errorContext *errorContext savedError error useNumber bool disallowUnknownFields bool @@ -258,10 +261,11 @@ func (d *decodeState) init(data []byte) *decodeState { d.data = data d.off = 0 d.savedError = nil - d.errorContext.Struct = nil - - // Reuse the allocated space for the FieldStack slice. - d.errorContext.FieldStack = d.errorContext.FieldStack[:0] + if d.errorContext != nil { + d.errorContext.Struct = nil + // Reuse the allocated space for the FieldStack slice. + d.errorContext.FieldStack = d.errorContext.FieldStack[:0] + } return d } @@ -275,12 +279,11 @@ func (d *decodeState) saveError(err error) { // addErrorContext returns a new error enhanced with information from d.errorContext func (d *decodeState) addErrorContext(err error) error { - if d.errorContext.Struct != nil || len(d.errorContext.FieldStack) > 0 { + if d.errorContext != nil && (d.errorContext.Struct != nil || len(d.errorContext.FieldStack) > 0) { switch err := err.(type) { case *UnmarshalTypeError: err.Struct = d.errorContext.Struct.Name() err.Field = strings.Join(d.errorContext.FieldStack, ".") - return err } } return err @@ -424,7 +427,7 @@ type unquotedValue struct{} // quoted string literal or literal null into an interface value. // If it finds anything other than a quoted string literal or null, // valueQuoted returns unquotedValue{}. -func (d *decodeState) valueQuoted() interface{} { +func (d *decodeState) valueQuoted() any { switch d.opcode { default: panic(phasePanicMsg) @@ -466,7 +469,7 @@ func indirect(v reflect.Value, decodingNull bool) (Unmarshaler, encoding.TextUnm // If v is a named type and is addressable, // start with its address, so that if the type has pointer methods, // we find them. - if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() { + if v.Kind() != reflect.Pointer && v.Type().Name() != "" && v.CanAddr() { haveAddr = true v = v.Addr() } @@ -475,14 +478,14 @@ func indirect(v reflect.Value, decodingNull bool) (Unmarshaler, encoding.TextUnm // usefully addressable. if v.Kind() == reflect.Interface && !v.IsNil() { e := v.Elem() - if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { + if e.Kind() == reflect.Pointer && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Pointer) { haveAddr = false v = e continue } } - if v.Kind() != reflect.Ptr { + if v.Kind() != reflect.Pointer { break } @@ -491,7 +494,7 @@ func indirect(v reflect.Value, decodingNull bool) (Unmarshaler, encoding.TextUnm } // Prevent infinite loop if v is an interface pointing to its own address: - // var v interface{} + // var v any // v = &v if v.Elem().Kind() == reflect.Interface && v.Elem().Elem() == v { v = v.Elem() @@ -676,7 +679,7 @@ func (d *decodeState) object(v reflect.Value) error { reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: default: - if !reflect.PtrTo(t.Key()).Implements(textUnmarshalerType) { + if !reflect.PointerTo(t.Key()).Implements(textUnmarshalerType) { d.saveError(&UnmarshalTypeError{Value: "object", Type: t, Offset: int64(d.off)}) d.skip() return nil @@ -695,7 +698,10 @@ func (d *decodeState) object(v reflect.Value) error { } var mapElem reflect.Value - origErrorContext := d.errorContext + var origErrorContext errorContext + if d.errorContext != nil { + origErrorContext = *d.errorContext + } for { // Read opening " of string key or closing }. @@ -749,7 +755,7 @@ func (d *decodeState) object(v reflect.Value) error { subv = v destring = f.quoted for _, i := range f.index { - if subv.Kind() == reflect.Ptr { + if subv.Kind() == reflect.Pointer { if subv.IsNil() { // If a struct embeds a pointer to an unexported type, // it is not possible to set a newly allocated value @@ -770,6 +776,9 @@ func (d *decodeState) object(v reflect.Value) error { } subv = subv.Field(i) } + if d.errorContext == nil { + d.errorContext = new(errorContext) + } d.errorContext.FieldStack = append(d.errorContext.FieldStack, f.name) d.errorContext.Struct = t } else if d.disallowUnknownFields { @@ -811,7 +820,7 @@ func (d *decodeState) object(v reflect.Value) error { kt := t.Key() var kv reflect.Value switch { - case reflect.PtrTo(kt).Implements(textUnmarshalerType): + case reflect.PointerTo(kt).Implements(textUnmarshalerType): kv = reflect.New(kt) if err := d.literalStore(item, kv, true); err != nil { return err @@ -850,11 +859,13 @@ func (d *decodeState) object(v reflect.Value) error { if d.opcode == scanSkipSpace { d.scanWhile(scanSkipSpace) } - // Reset errorContext to its original state. - // Keep the same underlying array for FieldStack, to reuse the - // space and avoid unnecessary allocs. - d.errorContext.FieldStack = d.errorContext.FieldStack[:len(origErrorContext.FieldStack)] - d.errorContext.Struct = origErrorContext.Struct + if d.errorContext != nil { + // Reset errorContext to its original state. + // Keep the same underlying array for FieldStack, to reuse the + // space and avoid unnecessary allocs. + d.errorContext.FieldStack = d.errorContext.FieldStack[:len(origErrorContext.FieldStack)] + d.errorContext.Struct = origErrorContext.Struct + } if d.opcode == scanEndObject { break } @@ -867,7 +878,7 @@ func (d *decodeState) object(v reflect.Value) error { // convertNumber converts the number literal s to a float64 or a Number // depending on the setting of d.useNumber. -func (d *decodeState) convertNumber(s string) (interface{}, error) { +func (d *decodeState) convertNumber(s string) (any, error) { if d.useNumber { return Number(s), nil } @@ -934,7 +945,7 @@ func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool break } switch v.Kind() { - case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: + case reflect.Interface, reflect.Pointer, reflect.Map, reflect.Slice: v.Set(reflect.Zero(v.Type())) // otherwise, ignore null for primitives/string } @@ -1064,7 +1075,7 @@ func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool // but they avoid the weight of reflection in this common case. // valueInterface is like value but returns interface{} -func (d *decodeState) valueInterface() interface{} { +func (d *decodeState) valueInterface() any { return unwrapNode(d.valueNode()) } @@ -1087,8 +1098,8 @@ func (d *decodeState) valueNode() (val Node) { } // arrayInterface is like array but returns []interface{}. -func (d *decodeState) arrayInterface() []interface{} { - return unwrapNode(d.arrayNode()).([]interface{}) +func (d *decodeState) arrayInterface() []any { + return unwrapNode(d.arrayNode()).([]any) } // arrayNode is like arrayInterface but returns Node. @@ -1124,8 +1135,8 @@ func (d *decodeState) arrayNode() Node { } // objectInterface is like object but returns map[string]interface{}. -func (d *decodeState) objectInterface() map[string]interface{} { - return unwrapNode(d.objectNode()).(map[string]interface{}) +func (d *decodeState) objectInterface() map[string]any { + return unwrapNode(d.objectNode()).(map[string]any) } // objectNode is like object but returns Node. @@ -1189,7 +1200,7 @@ func (d *decodeState) objectNode() Node { // literalInterface consumes and returns a literal from d.data[d.off-1:] and // it reads the following byte ahead. The first byte of the literal has been // read already (that's how the caller knows it's a literal). -func (d *decodeState) literalInterface() interface{} { +func (d *decodeState) literalInterface() any { // All bytes inside literal return scanContinue op code. start := d.readIndex() d.rescanLiteral() diff --git a/vendor/github.com/coreos/go-json/encode.go b/vendor/github.com/coreos/go-json/encode.go index 578d55110..1f5e3e446 100644 --- a/vendor/github.com/coreos/go-json/encode.go +++ b/vendor/github.com/coreos/go-json/encode.go @@ -155,7 +155,7 @@ import ( // handle them. Passing cyclic structures to Marshal will result in // an error. // -func Marshal(v interface{}) ([]byte, error) { +func Marshal(v any) ([]byte, error) { e := newEncodeState() err := e.marshal(v, encOpts{escapeHTML: true}) @@ -172,7 +172,7 @@ func Marshal(v interface{}) ([]byte, error) { // MarshalIndent is like Marshal but applies Indent to format the output. // Each JSON element in the output will begin on a new line beginning with prefix // followed by one or more copies of indent according to the indentation nesting. -func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { +func MarshalIndent(v any, prefix, indent string) ([]byte, error) { b, err := Marshal(v) if err != nil { return nil, err @@ -236,6 +236,8 @@ func (e *UnsupportedTypeError) Error() string { return "json: unsupported type: " + e.Type.String() } +// An UnsupportedValueError is returned by Marshal when attempting +// to encode an unsupported value. type UnsupportedValueError struct { Value reflect.Value Str string @@ -292,7 +294,7 @@ type encodeState struct { // startDetectingCyclesAfter, so that we skip the work if we're within a // reasonable amount of nested pointers deep. ptrLevel uint - ptrSeen map[interface{}]struct{} + ptrSeen map[any]struct{} } const startDetectingCyclesAfter = 1000 @@ -309,7 +311,7 @@ func newEncodeState() *encodeState { e.ptrLevel = 0 return e } - return &encodeState{ptrSeen: make(map[interface{}]struct{})} + return &encodeState{ptrSeen: make(map[any]struct{})} } // jsonError is an error wrapper type for internal use only. @@ -317,7 +319,7 @@ func newEncodeState() *encodeState { // can distinguish intentional panics from this package. type jsonError struct{ error } -func (e *encodeState) marshal(v interface{}, opts encOpts) (err error) { +func (e *encodeState) marshal(v any, opts encOpts) (err error) { defer func() { if r := recover(); r != nil { if je, ok := r.(jsonError); ok { @@ -348,7 +350,7 @@ func isEmptyValue(v reflect.Value) bool { return v.Uint() == 0 case reflect.Float32, reflect.Float64: return v.Float() == 0 - case reflect.Interface, reflect.Ptr: + case reflect.Interface, reflect.Pointer: return v.IsNil() } return false @@ -417,13 +419,13 @@ func newTypeEncoder(t reflect.Type, allowAddr bool) encoderFunc { // Marshaler with a value receiver, then we're better off taking // the address of the value - otherwise we end up with an // allocation as we cast the value to an interface. - if t.Kind() != reflect.Ptr && allowAddr && reflect.PtrTo(t).Implements(marshalerType) { + if t.Kind() != reflect.Pointer && allowAddr && reflect.PointerTo(t).Implements(marshalerType) { return newCondAddrEncoder(addrMarshalerEncoder, newTypeEncoder(t, false)) } if t.Implements(marshalerType) { return marshalerEncoder } - if t.Kind() != reflect.Ptr && allowAddr && reflect.PtrTo(t).Implements(textMarshalerType) { + if t.Kind() != reflect.Pointer && allowAddr && reflect.PointerTo(t).Implements(textMarshalerType) { return newCondAddrEncoder(addrTextMarshalerEncoder, newTypeEncoder(t, false)) } if t.Implements(textMarshalerType) { @@ -453,7 +455,7 @@ func newTypeEncoder(t reflect.Type, allowAddr bool) encoderFunc { return newSliceEncoder(t) case reflect.Array: return newArrayEncoder(t) - case reflect.Ptr: + case reflect.Pointer: return newPtrEncoder(t) default: return unsupportedTypeEncoder @@ -465,7 +467,7 @@ func invalidValueEncoder(e *encodeState, v reflect.Value, _ encOpts) { } func marshalerEncoder(e *encodeState, v reflect.Value, opts encOpts) { - if v.Kind() == reflect.Ptr && v.IsNil() { + if v.Kind() == reflect.Pointer && v.IsNil() { e.WriteString("null") return } @@ -502,7 +504,7 @@ func addrMarshalerEncoder(e *encodeState, v reflect.Value, opts encOpts) { } func textMarshalerEncoder(e *encodeState, v reflect.Value, opts encOpts) { - if v.Kind() == reflect.Ptr && v.IsNil() { + if v.Kind() == reflect.Pointer && v.IsNil() { e.WriteString("null") return } @@ -736,7 +738,7 @@ FieldLoop: // Find the nested struct field by following f.index. fv := v for _, i := range f.index { - if fv.Kind() == reflect.Ptr { + if fv.Kind() == reflect.Pointer { if fv.IsNil() { continue FieldLoop } @@ -779,28 +781,40 @@ func (me mapEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) { e.WriteString("null") return } + if e.ptrLevel++; e.ptrLevel > startDetectingCyclesAfter { + // We're a large number of nested ptrEncoder.encode calls deep; + // start checking if we've run into a pointer cycle. + ptr := v.Pointer() + if _, ok := e.ptrSeen[ptr]; ok { + e.error(&UnsupportedValueError{v, fmt.Sprintf("encountered a cycle via %s", v.Type())}) + } + e.ptrSeen[ptr] = struct{}{} + defer delete(e.ptrSeen, ptr) + } e.WriteByte('{') // Extract and sort the keys. - keys := v.MapKeys() - sv := make([]reflectWithString, len(keys)) - for i, v := range keys { - sv[i].v = v + sv := make([]reflectWithString, v.Len()) + mi := v.MapRange() + for i := 0; mi.Next(); i++ { + sv[i].k = mi.Key() + sv[i].v = mi.Value() if err := sv[i].resolve(); err != nil { e.error(fmt.Errorf("json: encoding error for type %q: %q", v.Type().String(), err.Error())) } } - sort.Slice(sv, func(i, j int) bool { return sv[i].s < sv[j].s }) + sort.Slice(sv, func(i, j int) bool { return sv[i].ks < sv[j].ks }) for i, kv := range sv { if i > 0 { e.WriteByte(',') } - e.string(kv.s, opts.escapeHTML) + e.string(kv.ks, opts.escapeHTML) e.WriteByte(':') - me.elemEnc(e, v.MapIndex(kv.v), opts) + me.elemEnc(e, kv.v, opts) } e.WriteByte('}') + e.ptrLevel-- } func newMapEncoder(t reflect.Type) encoderFunc { @@ -857,13 +871,29 @@ func (se sliceEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) { e.WriteString("null") return } + if e.ptrLevel++; e.ptrLevel > startDetectingCyclesAfter { + // We're a large number of nested ptrEncoder.encode calls deep; + // start checking if we've run into a pointer cycle. + // Here we use a struct to memorize the pointer to the first element of the slice + // and its length. + ptr := struct { + ptr uintptr + len int + }{v.Pointer(), v.Len()} + if _, ok := e.ptrSeen[ptr]; ok { + e.error(&UnsupportedValueError{v, fmt.Sprintf("encountered a cycle via %s", v.Type())}) + } + e.ptrSeen[ptr] = struct{}{} + defer delete(e.ptrSeen, ptr) + } se.arrayEnc(e, v, opts) + e.ptrLevel-- } func newSliceEncoder(t reflect.Type) encoderFunc { // Byte slices get special treatment; arrays don't. if t.Elem().Kind() == reflect.Uint8 { - p := reflect.PtrTo(t.Elem()) + p := reflect.PointerTo(t.Elem()) if !p.Implements(marshalerType) && !p.Implements(textMarshalerType) { return encodeByteSlice } @@ -946,7 +976,7 @@ func isValidTag(s string) bool { } for _, c := range s { switch { - case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c): + case strings.ContainsRune("!#$%&()*+-./:;<=>?@[]^_{|}~ ", c): // Backslash and quote chars are reserved, but // otherwise any punctuation chars are allowed // in a tag name. @@ -959,7 +989,7 @@ func isValidTag(s string) bool { func typeByIndex(t reflect.Type, index []int) reflect.Type { for _, i := range index { - if t.Kind() == reflect.Ptr { + if t.Kind() == reflect.Pointer { t = t.Elem() } t = t.Field(i).Type @@ -968,29 +998,30 @@ func typeByIndex(t reflect.Type, index []int) reflect.Type { } type reflectWithString struct { - v reflect.Value - s string + k reflect.Value + v reflect.Value + ks string } func (w *reflectWithString) resolve() error { - if w.v.Kind() == reflect.String { - w.s = w.v.String() + if w.k.Kind() == reflect.String { + w.ks = w.k.String() return nil } - if tm, ok := w.v.Interface().(encoding.TextMarshaler); ok { - if w.v.Kind() == reflect.Ptr && w.v.IsNil() { + if tm, ok := w.k.Interface().(encoding.TextMarshaler); ok { + if w.k.Kind() == reflect.Pointer && w.k.IsNil() { return nil } buf, err := tm.MarshalText() - w.s = string(buf) + w.ks = string(buf) return err } - switch w.v.Kind() { + switch w.k.Kind() { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - w.s = strconv.FormatInt(w.v.Int(), 10) + w.ks = strconv.FormatInt(w.k.Int(), 10) return nil case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - w.s = strconv.FormatUint(w.v.Uint(), 10) + w.ks = strconv.FormatUint(w.k.Uint(), 10) return nil } panic("unexpected map key type") @@ -1210,19 +1241,18 @@ func typeFields(t reflect.Type) structFields { // Scan f.typ for fields to include. for i := 0; i < f.typ.NumField(); i++ { sf := f.typ.Field(i) - isUnexported := sf.PkgPath != "" if sf.Anonymous { t := sf.Type - if t.Kind() == reflect.Ptr { + if t.Kind() == reflect.Pointer { t = t.Elem() } - if isUnexported && t.Kind() != reflect.Struct { + if !sf.IsExported() && t.Kind() != reflect.Struct { // Ignore embedded fields of unexported non-struct types. continue } // Do not ignore embedded fields of unexported struct types // since they may have exported fields. - } else if isUnexported { + } else if !sf.IsExported() { // Ignore unexported non-embedded fields. continue } @@ -1239,7 +1269,7 @@ func typeFields(t reflect.Type) structFields { index[len(f.index)] = i ft := sf.Type - if ft.Name() == "" && ft.Kind() == reflect.Ptr { + if ft.Name() == "" && ft.Kind() == reflect.Pointer { // Follow pointer. ft = ft.Elem() } diff --git a/vendor/github.com/coreos/go-json/fuzz.go b/vendor/github.com/coreos/go-json/fuzz.go index be03f0d7f..b8f4ff2c1 100644 --- a/vendor/github.com/coreos/go-json/fuzz.go +++ b/vendor/github.com/coreos/go-json/fuzz.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build gofuzz +//go:build gofuzz package json @@ -11,10 +11,10 @@ import ( ) func Fuzz(data []byte) (score int) { - for _, ctor := range []func() interface{}{ - func() interface{} { return new(interface{}) }, - func() interface{} { return new(map[string]interface{}) }, - func() interface{} { return new([]interface{}) }, + for _, ctor := range []func() any{ + func() any { return new(any) }, + func() any { return new(map[string]any) }, + func() any { return new([]any) }, } { v := ctor() err := Unmarshal(data, v) diff --git a/vendor/github.com/coreos/go-json/scanner.go b/vendor/github.com/coreos/go-json/scanner.go index 9dc1903e2..dbaa821be 100644 --- a/vendor/github.com/coreos/go-json/scanner.go +++ b/vendor/github.com/coreos/go-json/scanner.go @@ -83,7 +83,7 @@ type scanner struct { } var scannerPool = sync.Pool{ - New: func() interface{} { + New: func() any { return &scanner{} }, } diff --git a/vendor/github.com/coreos/go-json/stream.go b/vendor/github.com/coreos/go-json/stream.go index 81f404f42..6362170d5 100644 --- a/vendor/github.com/coreos/go-json/stream.go +++ b/vendor/github.com/coreos/go-json/stream.go @@ -46,7 +46,7 @@ func (dec *Decoder) DisallowUnknownFields() { dec.d.disallowUnknownFields = true // // See the documentation for Unmarshal for details about // the conversion of JSON into a Go value. -func (dec *Decoder) Decode(v interface{}) error { +func (dec *Decoder) Decode(v any) error { if dec.err != nil { return dec.err } @@ -198,7 +198,7 @@ func NewEncoder(w io.Writer) *Encoder { // // See the documentation for Marshal for details about the // conversion of Go values to JSON. -func (enc *Encoder) Encode(v interface{}) error { +func (enc *Encoder) Encode(v any) error { if enc.err != nil { return enc.err } @@ -288,7 +288,7 @@ var _ Unmarshaler = (*RawMessage)(nil) // string, for JSON string literals // nil, for JSON null // -type Token interface{} +type Token any const ( tokenTopValue = iota @@ -452,7 +452,7 @@ func (dec *Decoder) Token() (Token, error) { if !dec.tokenValueAllowed() { return dec.tokenError(c) } - var x interface{} + var x any if err := dec.Decode(&x); err != nil { return nil, err } diff --git a/vendor/github.com/coreos/go-json/tags.go b/vendor/github.com/coreos/go-json/tags.go index c38fd5102..b490328f4 100644 --- a/vendor/github.com/coreos/go-json/tags.go +++ b/vendor/github.com/coreos/go-json/tags.go @@ -15,10 +15,8 @@ type tagOptions string // parseTag splits a struct field's json tag into its name and // comma-separated options. func parseTag(tag string) (string, tagOptions) { - if idx := strings.Index(tag, ","); idx != -1 { - return tag[:idx], tagOptions(tag[idx+1:]) - } - return tag, tagOptions("") + tag, opt, _ := strings.Cut(tag, ",") + return tag, tagOptions(opt) } // Contains reports whether a comma-separated list of options @@ -30,15 +28,11 @@ func (o tagOptions) Contains(optionName string) bool { } s := string(o) for s != "" { - var next string - i := strings.Index(s, ",") - if i >= 0 { - s, next = s[:i], s[i+1:] - } - if s == optionName { + var name string + name, s, _ = strings.Cut(s, ",") + if name == optionName { return true } - s = next } return false } diff --git a/vendor/github.com/coreos/ign-converter/translate/v23tov30/v23tov30.go b/vendor/github.com/coreos/ign-converter/translate/v23tov30/v23tov30.go index dd9ea70d1..fc7d89c9b 100644 --- a/vendor/github.com/coreos/ign-converter/translate/v23tov30/v23tov30.go +++ b/vendor/github.com/coreos/ign-converter/translate/v23tov30/v23tov30.go @@ -68,7 +68,7 @@ func Check2_3(cfg old.Config, fsMap map[string]string) error { pathString := path.Join("/", fsMap[file.Filesystem], file.Path) name := fmt.Sprintf("File: %s", pathString) if duplicate, isDup := entryMap[pathString]; isDup { - return util.DuplicateInodeError{duplicate, name} + return util.DuplicateInodeError{Old: duplicate, New: name} } if l := util.CheckPathUsesLink(links, pathString); l != "" { return &util.UsesOwnLinkError{ @@ -82,7 +82,7 @@ func Check2_3(cfg old.Config, fsMap map[string]string) error { pathString := path.Join("/", fsMap[dir.Filesystem], dir.Path) name := fmt.Sprintf("Directory: %s", pathString) if duplicate, isDup := entryMap[pathString]; isDup { - return util.DuplicateInodeError{duplicate, name} + return util.DuplicateInodeError{Old: duplicate, New: name} } if l := util.CheckPathUsesLink(links, pathString); l != "" { return &util.UsesOwnLinkError{ @@ -96,7 +96,7 @@ func Check2_3(cfg old.Config, fsMap map[string]string) error { pathString := path.Join("/", fsMap[link.Filesystem], link.Path) name := fmt.Sprintf("Link: %s", pathString) if duplicate, isDup := entryMap[pathString]; isDup { - return &util.DuplicateInodeError{duplicate, name} + return &util.DuplicateInodeError{Old: duplicate, New: name} } entryMap[pathString] = name if l := util.CheckPathUsesLink(links, pathString); l != "" { @@ -111,14 +111,14 @@ func Check2_3(cfg old.Config, fsMap map[string]string) error { unitMap := map[string]struct{}{} // unit name -> struct{} for _, unit := range cfg.Systemd.Units { if _, isDup := unitMap[unit.Name]; isDup { - return util.DuplicateUnitError{unit.Name} + return util.DuplicateUnitError{Name: unit.Name} } unitMap[unit.Name] = struct{}{} dropinMap := map[string]struct{}{} // dropin name -> struct{} for _, dropin := range unit.Dropins { if _, isDup := dropinMap[dropin.Name]; isDup { - return util.DuplicateDropinError{unit.Name, dropin.Name} + return util.DuplicateDropinError{Unit: unit.Name, Name: dropin.Name} } dropinMap[dropin.Name] = struct{}{} } @@ -397,7 +397,7 @@ func translateFiles(files []old.File, m map[string]string) (ret []types.File) { // In spec 3, overwrite must be false if append is true // i.e. spec 2 files with append true must be translated to spec 3 files with overwrite false - if f.FileEmbedded1.Append == true { + if f.FileEmbedded1.Append { f.Node.Overwrite = util.BoolPStrict(false) } @@ -448,9 +448,9 @@ func translateDirectories(dirs []old.Directory, m map[string]string) (ret []type return } -// RemoveDuplicateFilesAndUnits is a helper function that removes duplicated files/units from -// spec v2 config, since neither spec v3 nor the translator function allow for duplicate file -// entries in the config. +// RemoveDuplicateFilesUnitsUsers is a helper function that removes duplicated files/units/users +// from spec v2 config, since neither spec v3 nor the translator function allow for duplicate +// file entries in the config. // This functionality is not included in the Translate function and has some limitations, but // may be useful in cases where configuration has to be sanitized before translation. // For duplicates, it takes ordering into consideration by taking the file/unit contents from @@ -459,9 +459,10 @@ func translateDirectories(dirs []old.Directory, m map[string]string) (ret []type // to the list of dropins of the deduplicated unit definition. // The function will fail if a non-root filesystem is declared on any file. // It will also fail if file appendices are encountered. -func RemoveDuplicateFilesAndUnits(cfg old.Config) (old.Config, error) { +func RemoveDuplicateFilesUnitsUsers(cfg old.Config) (old.Config, error) { files := cfg.Storage.Files units := cfg.Systemd.Units + users := cfg.Passwd.Users filePathMap := map[string]bool{} var outFiles []old.File @@ -470,7 +471,7 @@ func RemoveDuplicateFilesAndUnits(cfg old.Config) (old.Config, error) { if files[i].Filesystem != "root" { return old.Config{}, errors.New("cannot dedupe set of files on non-root filesystem") } - if files[i].Append == true { + if files[i].Append { return old.Config{}, errors.New("cannot dedupe set of files that contains appendices") } path := files[i].Path @@ -518,9 +519,32 @@ func RemoveDuplicateFilesAndUnits(cfg old.Config) (old.Config, error) { } } - // outFiles and outUnits should now have all duplication removed + // Concat sshkey sections into the newest passwdUser in the list + // Only the SSHAuthorizedKeys of a duplicate user are considered, + // all other fields are ignored. + userNameMap := map[string]bool{} + var outUsers []old.PasswdUser + // range from highest to lowest index + for i := len(users) - 1; i >= 0; i-- { + userName := users[i].Name + if _, isDup := userNameMap[userName]; isDup { + // this is a duplicated user by name, append keys to existing user + for j := range outUsers { + if outUsers[j].Name == userName { + outUsers[j].SSHAuthorizedKeys = append(outUsers[j].SSHAuthorizedKeys, users[i].SSHAuthorizedKeys...) + } + } + } else { + // append unique users + outUsers = append(outUsers, users[i]) + userNameMap[userName] = true + } + } + + // outFiles, outUnits, and outUsers should now have all duplication removed cfg.Storage.Files = outFiles cfg.Systemd.Units = outUnits + cfg.Passwd.Users = outUsers return cfg, nil } diff --git a/vendor/github.com/coreos/ign-converter/translate/v32tov31/v32tov31.go b/vendor/github.com/coreos/ign-converter/translate/v32tov31/v32tov31.go index f906748b0..13c835485 100644 --- a/vendor/github.com/coreos/ign-converter/translate/v32tov31/v32tov31.go +++ b/vendor/github.com/coreos/ign-converter/translate/v32tov31/v32tov31.go @@ -95,6 +95,7 @@ func translateConfig(old old_types.Config) (ret types.Config) { tr.Translate(&old, &ret) return } + // end copied Ignition v3_2/translate block // Translate translates Ignition spec config v3.2 to spec v3.1 @@ -133,7 +134,7 @@ func Translate(cfg old_types.Config) (types.Config, error) { res := translateConfig(cfg) // Sanity check the returned config - oldrpt := validate.ValidateWithContext(cfg, nil) + oldrpt := validate.ValidateWithContext(res, nil) if oldrpt.IsFatal() { return types.Config{}, fmt.Errorf("Converted spec has unexpected fatal error:\n%s", oldrpt.String()) } diff --git a/vendor/github.com/coreos/ign-converter/translate/v33tov32/v33tov32.go b/vendor/github.com/coreos/ign-converter/translate/v33tov32/v33tov32.go new file mode 100644 index 000000000..9a3a63b50 --- /dev/null +++ b/vendor/github.com/coreos/ign-converter/translate/v33tov32/v33tov32.go @@ -0,0 +1,133 @@ +// Copyright 2021 Red Hat, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v33tov32 + +import ( + "fmt" + "reflect" + + "github.com/coreos/ignition/v2/config/translate" + "github.com/coreos/ignition/v2/config/v3_2/types" + old_types "github.com/coreos/ignition/v2/config/v3_3/types" + "github.com/coreos/ignition/v2/config/validate" +) + +// Mostly a copy of github.com/coreos/ignition/v2/config/v3_3/translate/translate.go +// with the types & old_types imports reversed (the referenced file translates +// from 3.2 -> 3.3 but as a result only touches fields that are understood by +// the 3.2 spec). With additional logic to account for translation from a non-pointer +// field to a pointer field (e.g. ClevisCustom and Clevis), and the translation +// from a pointer to a non-pointer field (e.g. Link.Target, Raid.Level). +func translateIgnition(old old_types.Ignition) (ret types.Ignition) { + // use a new translator so we don't recurse infinitely + translate.NewTranslator().Translate(&old, &ret) + ret.Version = types.MaxVersion.String() + return +} + +func translateRaid(old old_types.Raid) (ret types.Raid) { + tr := translate.NewTranslator() + tr.Translate(&old.Devices, &ret.Devices) + tr.Translate(old.Level, &ret.Level) + tr.Translate(&old.Name, &ret.Name) + tr.Translate(&old.Options, &ret.Options) + tr.Translate(&old.Spares, &ret.Spares) + return +} + +func translateLuks(old old_types.Luks) (ret types.Luks) { + tr := translate.NewTranslator() + tr.AddCustomTranslator(translateClevis) + // this goes from "not pointer" in 3.3 to "pointer" in 3.2 so we need to + // populate it if old.Clevis isn't empty + if !reflect.DeepEqual(old.Clevis, old_types.Clevis{}) { + ret.Clevis = &types.Clevis{} + tr.Translate(&old.Clevis, ret.Clevis) + } + tr.Translate(&old.Device, &ret.Device) + tr.Translate(&old.KeyFile, &ret.KeyFile) + tr.Translate(&old.Label, &ret.Label) + tr.Translate(&old.Name, &ret.Name) + tr.Translate(&old.Options, &ret.Options) + tr.Translate(&old.UUID, &ret.UUID) + tr.Translate(&old.WipeVolume, &ret.WipeVolume) + return +} + +func translateClevis(old old_types.Clevis) (ret types.Clevis) { + tr := translate.NewTranslator() + tr.AddCustomTranslator(translateClevisCustom) + // this goes from "not pointer" in 3.3 to "pointer" in 3.2 so we need to + // populate it if old.Custom isn't empty + if !reflect.DeepEqual(old.Custom, old_types.ClevisCustom{}) { + ret.Custom = &types.Custom{} + tr.Translate(&old.Custom, ret.Custom) + } + tr.Translate(&old.Tang, &ret.Tang) + tr.Translate(&old.Threshold, &ret.Threshold) + tr.Translate(&old.Tpm2, &ret.Tpm2) + return +} + +func translateClevisCustom(old old_types.ClevisCustom) (ret types.Custom) { + tr := translate.NewTranslator() + tr.Translate(old.Config, &ret.Config) + tr.Translate(&old.NeedsNetwork, &ret.NeedsNetwork) + tr.Translate(old.Pin, &ret.Pin) + return +} + +func translateLinkEmbedded1(old old_types.LinkEmbedded1) (ret types.LinkEmbedded1) { + tr := translate.NewTranslator() + tr.Translate(&old.Hard, &ret.Hard) + tr.Translate(old.Target, &ret.Target) + return +} + +func translateConfig(old old_types.Config) (ret types.Config) { + tr := translate.NewTranslator() + tr.AddCustomTranslator(translateIgnition) + tr.AddCustomTranslator(translateRaid) + tr.AddCustomTranslator(translateLuks) + tr.AddCustomTranslator(translateLinkEmbedded1) + tr.Translate(&old.Ignition, &ret.Ignition) + tr.Translate(&old.Passwd, &ret.Passwd) + tr.Translate(&old.Storage, &ret.Storage) + tr.Translate(&old.Systemd, &ret.Systemd) + return +} + +// end copied Ignition v3_3/translate block + +// Translate translates Ignition spec config v3.3 to spec v3.2 +func Translate(cfg old_types.Config) (types.Config, error) { + rpt := validate.ValidateWithContext(cfg, nil) + if rpt.IsFatal() { + return types.Config{}, fmt.Errorf("Invalid input config:\n%s", rpt.String()) + } + + if len(cfg.KernelArguments.ShouldExist) > 0 || len(cfg.KernelArguments.ShouldNotExist) > 0 { + return types.Config{}, fmt.Errorf("KernelArguments is not supported on 3.2") + } + + res := translateConfig(cfg) + + // Sanity check the returned config + oldrpt := validate.ValidateWithContext(res, nil) + if oldrpt.IsFatal() { + return types.Config{}, fmt.Errorf("Converted spec has unexpected fatal error:\n%s", oldrpt.String()) + } + return res, nil +} diff --git a/vendor/github.com/coreos/ign-converter/translate/v34tov33/v34tov33.go b/vendor/github.com/coreos/ign-converter/translate/v34tov33/v34tov33.go new file mode 100644 index 000000000..3b0a378f7 --- /dev/null +++ b/vendor/github.com/coreos/ign-converter/translate/v34tov33/v34tov33.go @@ -0,0 +1,192 @@ +// Copyright 2020 Red Hat, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v34tov33 + +import ( + "fmt" + "net/url" + "reflect" + + "github.com/coreos/ignition/v2/config/translate" + "github.com/coreos/ignition/v2/config/util" + "github.com/coreos/ignition/v2/config/v3_3/types" + old_types "github.com/coreos/ignition/v2/config/v3_4/types" + "github.com/coreos/ignition/v2/config/validate" +) + +// Copy of github.com/coreos/ignition/v2/config/v3_4/translate/translate.go +// with the types & old_types imports reversed (the referenced file translates +// from 3.3 -> 3.4 but as a result only touches fields that are understood by +// the 3.3 spec). +func translateIgnition(old old_types.Ignition) (ret types.Ignition) { + // use a new translator so we don't recurse infinitely + translate.NewTranslator().Translate(&old, &ret) + ret.Version = types.MaxVersion.String() + return +} + +func translateFileEmbedded1(old old_types.FileEmbedded1) (ret types.FileEmbedded1) { + tr := translate.NewTranslator() + tr.Translate(&old.Append, &ret.Append) + tr.Translate(&old.Contents, &ret.Contents) + if old.Mode != nil { + // We support the special mode bits for specs >=3.4.0, so if + // the user provides special mode bits in an Ignition config + // with the version < 3.4.0, then we need to explicitly mask + // those bits out during translation. + ret.Mode = util.IntToPtr(*old.Mode & ^07000) + } + return +} + +func translateDirectoryEmbedded1(old old_types.DirectoryEmbedded1) (ret types.DirectoryEmbedded1) { + if old.Mode != nil { + // We support the special mode bits for specs >=3.4.0, so if + // the user provides special mode bits in an Ignition config + // with the version < 3.4.0, then we need to explicitly mask + // those bits out during translation. + ret.Mode = util.IntToPtr(*old.Mode & ^07000) + } + return +} + +func translateLuks(old old_types.Luks) (ret types.Luks) { + tr := translate.NewTranslator() + tr.AddCustomTranslator(translateTang) + tr.Translate(&old.Clevis, &ret.Clevis) + tr.Translate(&old.Device, &ret.Device) + tr.Translate(&old.KeyFile, &ret.KeyFile) + tr.Translate(&old.Label, &ret.Label) + tr.Translate(&old.Name, &ret.Name) + tr.Translate(&old.Options, &ret.Options) + tr.Translate(&old.UUID, &ret.UUID) + tr.Translate(&old.WipeVolume, &ret.WipeVolume) + return +} + +func translateTang(old old_types.Tang) (ret types.Tang) { + tr := translate.NewTranslator() + tr.Translate(&old.Thumbprint, &ret.Thumbprint) + tr.Translate(&old.URL, &ret.URL) + return +} + +func translateConfig(old old_types.Config) (ret types.Config) { + tr := translate.NewTranslator() + tr.AddCustomTranslator(translateIgnition) + tr.AddCustomTranslator(translateDirectoryEmbedded1) + tr.AddCustomTranslator(translateFileEmbedded1) + tr.AddCustomTranslator(translateLuks) + tr.Translate(&old, &ret) + return +} + +// end copied Ignition v3_4/translate block + +// Translate translates Ignition spec config v3.4 to spec v3.3 +func Translate(cfg old_types.Config) (types.Config, error) { + rpt := validate.ValidateWithContext(cfg, nil) + if rpt.IsFatal() { + return types.Config{}, fmt.Errorf("Invalid input config:\n%s", rpt.String()) + } + + err := checkValue(reflect.ValueOf(cfg)) + if err != nil { + return types.Config{}, err + } + + res := translateConfig(cfg) + + // Sanity check the returned config + oldrpt := validate.ValidateWithContext(res, nil) + if oldrpt.IsFatal() { + return types.Config{}, fmt.Errorf("Converted spec has unexpected fatal error:\n%s", oldrpt.String()) + } + return res, nil +} + +func checkValue(v reflect.Value) error { + switch v.Type() { + case reflect.TypeOf(old_types.Tang{}): + tang := v.Interface().(old_types.Tang) + // 3.3 does not support tang offline provisioning + if util.NotEmpty(tang.Advertisement) { + return fmt.Errorf("Invalid input config: tang offline provisioning is not supported in spec v3.3") + } + case reflect.TypeOf(old_types.Luks{}): + luks := v.Interface().(old_types.Luks) + // 3.3 does not support luks discard + if util.IsTrue(luks.Discard) { + return fmt.Errorf("Invalid input config: luks discard is not supported in spec v3.3") + } + // 3.3 does not support luks openOptions + if len(luks.OpenOptions) > 0 { + return fmt.Errorf("Invalid input config: luks openOptions is not supported in spec v3.3") + } + case reflect.TypeOf(old_types.FileEmbedded1{}): + f := v.Interface().(old_types.FileEmbedded1) + // 3.3 does not support special mode bits in files + if f.Mode != nil && (*f.Mode&07000) != 0 { + return fmt.Errorf("Invalid input config: special mode bits are not supported in spec v3.3") + } + case reflect.TypeOf(old_types.DirectoryEmbedded1{}): + d := v.Interface().(old_types.DirectoryEmbedded1) + // 3.3 does not support special mode bits in directories + if d.Mode != nil && (*d.Mode&07000) != 0 { + return fmt.Errorf("Invalid input config: special mode bits are not supported in spec v3.3") + } + case reflect.TypeOf(old_types.Resource{}): + resource := v.Interface().(old_types.Resource) + // 3.3 does not support arn: scheme for s3 + if util.NotEmpty(resource.Source) { + u, err := url.Parse(*resource.Source) + if err != nil { + return fmt.Errorf("Invalid input config: %v", err) + } + if u.Scheme == "arn" { + return fmt.Errorf("Invalid input config: arn: scheme for s3 is not supported in spec v3.3") + } + } + } + return descend(v) +} + +func descend(v reflect.Value) error { + k := v.Type().Kind() + switch { + case util.IsPrimitive(k): + return nil + case k == reflect.Struct: + for i := 0; i < v.NumField(); i += 1 { + err := checkValue(v.Field(i)) + if err != nil { + return err + } + } + case k == reflect.Slice: + for i := 0; i < v.Len(); i += 1 { + err := checkValue(v.Index(i)) + if err != nil { + return err + } + } + case k == reflect.Ptr: + v = v.Elem() + if v.IsValid() { + return checkValue(v) + } + } + return nil +} diff --git a/vendor/github.com/coreos/ignition/v2/config/shared/errors/errors.go b/vendor/github.com/coreos/ignition/v2/config/shared/errors/errors.go index 492fd7e6c..8e2d24d5e 100644 --- a/vendor/github.com/coreos/ignition/v2/config/shared/errors/errors.go +++ b/vendor/github.com/coreos/ignition/v2/config/shared/errors/errors.go @@ -39,6 +39,7 @@ var ( ErrLinkUsedSymlink = errors.New("link path includes link in config") ErrLinkTargetRequired = errors.New("link target is required") ErrHardLinkToDirectory = errors.New("hard link target is a directory") + ErrHardLinkSpecifiesOwner = errors.New("user/group ignored for hard link") ErrDiskDeviceRequired = errors.New("disk device is required") ErrPartitionNumbersCollide = errors.New("partition numbers collide") ErrPartitionsOverlap = errors.New("partitions overlap") @@ -61,6 +62,7 @@ var ( ErrClevisConfigRequired = errors.New("missing required custom clevis config") ErrClevisCustomWithOthers = errors.New("cannot use custom clevis config with tpm2, tang, or threshold") ErrTangThumbprintRequired = errors.New("thumbprint is required") + ErrInvalidTangAdvertisement = errors.New("advertisement is not valid JSON") ErrFileIllegalMode = errors.New("illegal file mode") ErrModeSpecialBits = errors.New("setuid/setgid/sticky bits are not supported in spec versions older than 3.4.0") ErrBothIDAndNameSet = errors.New("cannot set both id and name") @@ -70,6 +72,8 @@ var ( ErrNoPath = errors.New("path not specified") ErrPathRelative = errors.New("path not absolute") ErrDirtyPath = errors.New("path is not fully simplified") + ErrPartitionsOverwritten = errors.New("filesystem overwrites partitioned device") + ErrFilesystemImplicitWipe = errors.New("device matches disk with wipeTable enabled; filesystem will be wiped") ErrRaidLevelRequired = errors.New("raid level is required") ErrSparesUnsupportedForLevel = errors.New("spares unsupported for linear and raid0 arrays") ErrUnrecognizedRaidLevel = errors.New("unrecognized raid level") @@ -80,6 +84,7 @@ var ( ErrDuplicateLabels = errors.New("cannot use the same partition label twice") ErrInvalidProxy = errors.New("proxies must be http(s)") ErrInsecureProxy = errors.New("insecure plaintext HTTP proxy specified for HTTPS resources") + ErrPathConflictsSystemd = errors.New("path conflicts with systemd unit or dropin") // Systemd section errors ErrInvalidSystemdExt = errors.New("invalid systemd unit extension") @@ -113,3 +118,9 @@ var ( func NewNoInstallSectionError(name string) error { return fmt.Errorf("unit %q is enabled, but has no install section so enable does nothing", name) } + +// NewNoInstallSectionForInstantiableUnitError produces an error indicating the +// given instantiable unit for an instance unit is missing an Install section. +func NewNoInstallSectionForInstantiableUnitError(instantiable, instance string) error { + return fmt.Errorf("template unit %q for %q doesn't have Install section", instantiable, instance) +} diff --git a/vendor/github.com/coreos/ignition/v2/config/shared/parse/unit.go b/vendor/github.com/coreos/ignition/v2/config/shared/parse/unit.go new file mode 100644 index 000000000..7dcd18910 --- /dev/null +++ b/vendor/github.com/coreos/ignition/v2/config/shared/parse/unit.go @@ -0,0 +1,37 @@ +// Copyright 2022 Red Hat, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package parse contains a function for parsing unit contents shared between +// multiple config versions. +package parse + +import ( + "fmt" + "strings" + + "github.com/coreos/go-systemd/v22/unit" +) + +// ParseUnitContents parses the content of a given unit +func ParseUnitContents(content *string) ([]*unit.UnitOption, error) { + if content == nil { + return []*unit.UnitOption{}, nil + } + c := strings.NewReader(*content) + opts, err := unit.Deserialize(c) + if err != nil { + return nil, fmt.Errorf("invalid unit content: %s", err) + } + return opts, nil +} diff --git a/vendor/github.com/coreos/ignition/v2/config/util/config.go b/vendor/github.com/coreos/ignition/v2/config/util/config.go index 85cd7fa7c..0b6766985 100644 --- a/vendor/github.com/coreos/ignition/v2/config/util/config.go +++ b/vendor/github.com/coreos/ignition/v2/config/util/config.go @@ -11,6 +11,7 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. + package util import ( diff --git a/vendor/github.com/coreos/ignition/v2/config/v3_0/types/config.go b/vendor/github.com/coreos/ignition/v2/config/v3_0/types/config.go index a98062a1b..1ac295948 100644 --- a/vendor/github.com/coreos/ignition/v2/config/v3_0/types/config.go +++ b/vendor/github.com/coreos/ignition/v2/config/v3_0/types/config.go @@ -15,7 +15,12 @@ package types import ( + "github.com/coreos/ignition/v2/config/shared/errors" + "github.com/coreos/ignition/v2/config/util" + "github.com/coreos/go-semver/semver" + "github.com/coreos/vcontext/path" + "github.com/coreos/vcontext/report" ) var ( @@ -24,3 +29,36 @@ var ( Minor: 0, } ) + +func (cfg Config) Validate(c path.ContextPath) (r report.Report) { + systemdPath := "/etc/systemd/system/" + unitPaths := map[string]struct{}{} + for _, unit := range cfg.Systemd.Units { + if !util.NilOrEmpty(unit.Contents) { + pathString := systemdPath + unit.Name + unitPaths[pathString] = struct{}{} + } + for _, dropin := range unit.Dropins { + if !util.NilOrEmpty(dropin.Contents) { + pathString := systemdPath + unit.Name + ".d/" + dropin.Name + unitPaths[pathString] = struct{}{} + } + } + } + for i, f := range cfg.Storage.Files { + if _, exists := unitPaths[f.Path]; exists { + r.AddOnError(c.Append("storage", "files", i, "path"), errors.ErrPathConflictsSystemd) + } + } + for i, d := range cfg.Storage.Directories { + if _, exists := unitPaths[d.Path]; exists { + r.AddOnError(c.Append("storage", "directories", i, "path"), errors.ErrPathConflictsSystemd) + } + } + for i, l := range cfg.Storage.Links { + if _, exists := unitPaths[l.Path]; exists { + r.AddOnError(c.Append("storage", "links", i, "path"), errors.ErrPathConflictsSystemd) + } + } + return +} diff --git a/vendor/github.com/coreos/ignition/v2/config/v3_0/types/storage.go b/vendor/github.com/coreos/ignition/v2/config/v3_0/types/storage.go index eac98e74b..24668954b 100644 --- a/vendor/github.com/coreos/ignition/v2/config/v3_0/types/storage.go +++ b/vendor/github.com/coreos/ignition/v2/config/v3_0/types/storage.go @@ -34,6 +34,14 @@ func (s Storage) MergedKeys() map[string]string { } func (s Storage) Validate(c vpath.ContextPath) (r report.Report) { + s.validateDirectories(c, &r) + s.validateFiles(c, &r) + s.validateLinks(c, &r) + s.validateFilesystems(c, &r) + return +} + +func (s Storage) validateDirectories(c vpath.ContextPath, r *report.Report) { for i, d := range s.Directories { for _, l := range s.Links { if strings.HasPrefix(d.Path, l.Path+"/") { @@ -41,6 +49,9 @@ func (s Storage) Validate(c vpath.ContextPath) (r report.Report) { } } } +} + +func (s Storage) validateFiles(c vpath.ContextPath, r *report.Report) { for i, f := range s.Files { for _, l := range s.Links { if strings.HasPrefix(f.Path, l.Path+"/") { @@ -48,6 +59,9 @@ func (s Storage) Validate(c vpath.ContextPath) (r report.Report) { } } } +} + +func (s Storage) validateLinks(c vpath.ContextPath, r *report.Report) { for i, l1 := range s.Links { for _, l2 := range s.Links { if strings.HasPrefix(l1.Path, l2.Path+"/") { @@ -66,6 +80,32 @@ func (s Storage) Validate(c vpath.ContextPath) (r report.Report) { r.AddOnError(c.Append("links", i), errors.ErrHardLinkToDirectory) } } + ownerCheck := func(ok bool, path vpath.ContextPath) { + if !ok { + r.AddOnWarn(path, errors.ErrHardLinkSpecifiesOwner) + } + } + ownerCheck(l1.User.ID == nil, c.Append("links", i, "user", "id")) + ownerCheck(l1.User.Name == nil, c.Append("links", i, "user", "name")) + ownerCheck(l1.Group.ID == nil, c.Append("links", i, "group", "id")) + ownerCheck(l1.Group.Name == nil, c.Append("links", i, "group", "name")) + } +} + +func (s Storage) validateFilesystems(c vpath.ContextPath, r *report.Report) { + disks := make(map[string]Disk) + for _, d := range s.Disks { + disks[d.Device] = d + } + + for i, f := range s.Filesystems { + disk, exist := disks[f.Device] + if exist { + if len(disk.Partitions) > 0 { + r.AddOnWarn(c.Append("filesystems", i, "device"), errors.ErrPartitionsOverwritten) + } else if !util.IsTrue(f.WipeFilesystem) && util.IsTrue(disk.WipeTable) { + r.AddOnWarn(c.Append("filesystems", i, "device"), errors.ErrFilesystemImplicitWipe) + } + } } - return } diff --git a/vendor/github.com/coreos/ignition/v2/config/v3_0/types/systemd.go b/vendor/github.com/coreos/ignition/v2/config/v3_0/types/systemd.go new file mode 100644 index 000000000..ac521ba73 --- /dev/null +++ b/vendor/github.com/coreos/ignition/v2/config/v3_0/types/systemd.go @@ -0,0 +1,61 @@ +// Copyright 2022 Red Hat, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "regexp" + + "github.com/coreos/ignition/v2/config/shared/errors" + "github.com/coreos/ignition/v2/config/shared/parse" + "github.com/coreos/ignition/v2/config/util" + + vpath "github.com/coreos/vcontext/path" + "github.com/coreos/vcontext/report" +) + +func (s Systemd) Validate(c vpath.ContextPath) (r report.Report) { + units := make(map[string]Unit) + checkInstanceUnit := regexp.MustCompile(`^(.+?)@(.+?)\.service$`) + for _, d := range s.Units { + units[d.Name] = d + } + for index, unit := range s.Units { + if checkInstanceUnit.MatchString(unit.Name) && util.IsTrue(unit.Enabled) { + instUnitSlice := checkInstanceUnit.FindSubmatch([]byte(unit.Name)) + instantiableUnit := string(instUnitSlice[1]) + "@.service" + if _, ok := units[instantiableUnit]; ok && util.NotEmpty(units[instantiableUnit].Contents) { + foundInstallSection := false + // we're doing a separate validation pass on each unit to identify + // if an instantiable unit has the install section. So logging an + // `AddOnError` will produce duplicate errors on bad unit contents + // because we're already doing that while validating a unit separately. + opts, err := parse.ParseUnitContents(units[instantiableUnit].Contents) + if err != nil { + continue + } + for _, section := range opts { + if section.Section == "Install" { + foundInstallSection = true + break + } + } + if !foundInstallSection { + r.AddOnWarn(c.Append("units", index, "contents"), errors.NewNoInstallSectionForInstantiableUnitError(instantiableUnit, unit.Name)) + } + } + } + } + return +} diff --git a/vendor/github.com/coreos/ignition/v2/config/v3_0/types/unit.go b/vendor/github.com/coreos/ignition/v2/config/v3_0/types/unit.go index e0e5c50a3..786debaee 100644 --- a/vendor/github.com/coreos/ignition/v2/config/v3_0/types/unit.go +++ b/vendor/github.com/coreos/ignition/v2/config/v3_0/types/unit.go @@ -15,15 +15,13 @@ package types import ( - "fmt" "path" - "strings" "github.com/coreos/ignition/v2/config/shared/errors" + "github.com/coreos/ignition/v2/config/shared/parse" "github.com/coreos/ignition/v2/config/shared/validations" "github.com/coreos/ignition/v2/config/util" - "github.com/coreos/go-systemd/v22/unit" cpath "github.com/coreos/vcontext/path" "github.com/coreos/vcontext/report" ) @@ -39,7 +37,7 @@ func (d Dropin) Key() string { func (u Unit) Validate(c cpath.ContextPath) (r report.Report) { r.AddOnError(c.Append("name"), validateName(u.Name)) c = c.Append("contents") - opts, err := validateUnitContent(u.Contents) + opts, err := parse.ParseUnitContents(u.Contents) r.AddOnError(c, err) r.AddOnWarn(c, validations.ValidateInstallSection(u.Name, util.IsTrue(u.Enabled), util.NilOrEmpty(u.Contents), opts)) @@ -57,7 +55,7 @@ func validateName(name string) error { } func (d Dropin) Validate(c cpath.ContextPath) (r report.Report) { - _, err := validateUnitContent(d.Contents) + _, err := parse.ParseUnitContents(d.Contents) r.AddOnError(c.Append("contents"), err) switch path.Ext(d.Name) { @@ -68,15 +66,3 @@ func (d Dropin) Validate(c cpath.ContextPath) (r report.Report) { return } - -func validateUnitContent(content *string) ([]*unit.UnitOption, error) { - if content == nil { - return []*unit.UnitOption{}, nil - } - c := strings.NewReader(*content) - opts, err := unit.Deserialize(c) - if err != nil { - return nil, fmt.Errorf("invalid unit content: %s", err) - } - return opts, nil -} diff --git a/vendor/github.com/coreos/ignition/v2/config/v3_1/types/config.go b/vendor/github.com/coreos/ignition/v2/config/v3_1/types/config.go index 23ba8dd18..3cebde7fb 100644 --- a/vendor/github.com/coreos/ignition/v2/config/v3_1/types/config.go +++ b/vendor/github.com/coreos/ignition/v2/config/v3_1/types/config.go @@ -15,7 +15,12 @@ package types import ( + "github.com/coreos/ignition/v2/config/shared/errors" + "github.com/coreos/ignition/v2/config/util" + "github.com/coreos/go-semver/semver" + "github.com/coreos/vcontext/path" + "github.com/coreos/vcontext/report" ) var ( @@ -24,3 +29,36 @@ var ( Minor: 1, } ) + +func (cfg Config) Validate(c path.ContextPath) (r report.Report) { + systemdPath := "/etc/systemd/system/" + unitPaths := map[string]struct{}{} + for _, unit := range cfg.Systemd.Units { + if !util.NilOrEmpty(unit.Contents) { + pathString := systemdPath + unit.Name + unitPaths[pathString] = struct{}{} + } + for _, dropin := range unit.Dropins { + if !util.NilOrEmpty(dropin.Contents) { + pathString := systemdPath + unit.Name + ".d/" + dropin.Name + unitPaths[pathString] = struct{}{} + } + } + } + for i, f := range cfg.Storage.Files { + if _, exists := unitPaths[f.Path]; exists { + r.AddOnError(c.Append("storage", "files", i, "path"), errors.ErrPathConflictsSystemd) + } + } + for i, d := range cfg.Storage.Directories { + if _, exists := unitPaths[d.Path]; exists { + r.AddOnError(c.Append("storage", "directories", i, "path"), errors.ErrPathConflictsSystemd) + } + } + for i, l := range cfg.Storage.Links { + if _, exists := unitPaths[l.Path]; exists { + r.AddOnError(c.Append("storage", "links", i, "path"), errors.ErrPathConflictsSystemd) + } + } + return +} diff --git a/vendor/github.com/coreos/ignition/v2/config/v3_1/types/storage.go b/vendor/github.com/coreos/ignition/v2/config/v3_1/types/storage.go index eac98e74b..24668954b 100644 --- a/vendor/github.com/coreos/ignition/v2/config/v3_1/types/storage.go +++ b/vendor/github.com/coreos/ignition/v2/config/v3_1/types/storage.go @@ -34,6 +34,14 @@ func (s Storage) MergedKeys() map[string]string { } func (s Storage) Validate(c vpath.ContextPath) (r report.Report) { + s.validateDirectories(c, &r) + s.validateFiles(c, &r) + s.validateLinks(c, &r) + s.validateFilesystems(c, &r) + return +} + +func (s Storage) validateDirectories(c vpath.ContextPath, r *report.Report) { for i, d := range s.Directories { for _, l := range s.Links { if strings.HasPrefix(d.Path, l.Path+"/") { @@ -41,6 +49,9 @@ func (s Storage) Validate(c vpath.ContextPath) (r report.Report) { } } } +} + +func (s Storage) validateFiles(c vpath.ContextPath, r *report.Report) { for i, f := range s.Files { for _, l := range s.Links { if strings.HasPrefix(f.Path, l.Path+"/") { @@ -48,6 +59,9 @@ func (s Storage) Validate(c vpath.ContextPath) (r report.Report) { } } } +} + +func (s Storage) validateLinks(c vpath.ContextPath, r *report.Report) { for i, l1 := range s.Links { for _, l2 := range s.Links { if strings.HasPrefix(l1.Path, l2.Path+"/") { @@ -66,6 +80,32 @@ func (s Storage) Validate(c vpath.ContextPath) (r report.Report) { r.AddOnError(c.Append("links", i), errors.ErrHardLinkToDirectory) } } + ownerCheck := func(ok bool, path vpath.ContextPath) { + if !ok { + r.AddOnWarn(path, errors.ErrHardLinkSpecifiesOwner) + } + } + ownerCheck(l1.User.ID == nil, c.Append("links", i, "user", "id")) + ownerCheck(l1.User.Name == nil, c.Append("links", i, "user", "name")) + ownerCheck(l1.Group.ID == nil, c.Append("links", i, "group", "id")) + ownerCheck(l1.Group.Name == nil, c.Append("links", i, "group", "name")) + } +} + +func (s Storage) validateFilesystems(c vpath.ContextPath, r *report.Report) { + disks := make(map[string]Disk) + for _, d := range s.Disks { + disks[d.Device] = d + } + + for i, f := range s.Filesystems { + disk, exist := disks[f.Device] + if exist { + if len(disk.Partitions) > 0 { + r.AddOnWarn(c.Append("filesystems", i, "device"), errors.ErrPartitionsOverwritten) + } else if !util.IsTrue(f.WipeFilesystem) && util.IsTrue(disk.WipeTable) { + r.AddOnWarn(c.Append("filesystems", i, "device"), errors.ErrFilesystemImplicitWipe) + } + } } - return } diff --git a/vendor/github.com/coreos/ignition/v2/config/v3_1/types/systemd.go b/vendor/github.com/coreos/ignition/v2/config/v3_1/types/systemd.go new file mode 100644 index 000000000..ac521ba73 --- /dev/null +++ b/vendor/github.com/coreos/ignition/v2/config/v3_1/types/systemd.go @@ -0,0 +1,61 @@ +// Copyright 2022 Red Hat, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "regexp" + + "github.com/coreos/ignition/v2/config/shared/errors" + "github.com/coreos/ignition/v2/config/shared/parse" + "github.com/coreos/ignition/v2/config/util" + + vpath "github.com/coreos/vcontext/path" + "github.com/coreos/vcontext/report" +) + +func (s Systemd) Validate(c vpath.ContextPath) (r report.Report) { + units := make(map[string]Unit) + checkInstanceUnit := regexp.MustCompile(`^(.+?)@(.+?)\.service$`) + for _, d := range s.Units { + units[d.Name] = d + } + for index, unit := range s.Units { + if checkInstanceUnit.MatchString(unit.Name) && util.IsTrue(unit.Enabled) { + instUnitSlice := checkInstanceUnit.FindSubmatch([]byte(unit.Name)) + instantiableUnit := string(instUnitSlice[1]) + "@.service" + if _, ok := units[instantiableUnit]; ok && util.NotEmpty(units[instantiableUnit].Contents) { + foundInstallSection := false + // we're doing a separate validation pass on each unit to identify + // if an instantiable unit has the install section. So logging an + // `AddOnError` will produce duplicate errors on bad unit contents + // because we're already doing that while validating a unit separately. + opts, err := parse.ParseUnitContents(units[instantiableUnit].Contents) + if err != nil { + continue + } + for _, section := range opts { + if section.Section == "Install" { + foundInstallSection = true + break + } + } + if !foundInstallSection { + r.AddOnWarn(c.Append("units", index, "contents"), errors.NewNoInstallSectionForInstantiableUnitError(instantiableUnit, unit.Name)) + } + } + } + } + return +} diff --git a/vendor/github.com/coreos/ignition/v2/config/v3_1/types/unit.go b/vendor/github.com/coreos/ignition/v2/config/v3_1/types/unit.go index e0e5c50a3..786debaee 100644 --- a/vendor/github.com/coreos/ignition/v2/config/v3_1/types/unit.go +++ b/vendor/github.com/coreos/ignition/v2/config/v3_1/types/unit.go @@ -15,15 +15,13 @@ package types import ( - "fmt" "path" - "strings" "github.com/coreos/ignition/v2/config/shared/errors" + "github.com/coreos/ignition/v2/config/shared/parse" "github.com/coreos/ignition/v2/config/shared/validations" "github.com/coreos/ignition/v2/config/util" - "github.com/coreos/go-systemd/v22/unit" cpath "github.com/coreos/vcontext/path" "github.com/coreos/vcontext/report" ) @@ -39,7 +37,7 @@ func (d Dropin) Key() string { func (u Unit) Validate(c cpath.ContextPath) (r report.Report) { r.AddOnError(c.Append("name"), validateName(u.Name)) c = c.Append("contents") - opts, err := validateUnitContent(u.Contents) + opts, err := parse.ParseUnitContents(u.Contents) r.AddOnError(c, err) r.AddOnWarn(c, validations.ValidateInstallSection(u.Name, util.IsTrue(u.Enabled), util.NilOrEmpty(u.Contents), opts)) @@ -57,7 +55,7 @@ func validateName(name string) error { } func (d Dropin) Validate(c cpath.ContextPath) (r report.Report) { - _, err := validateUnitContent(d.Contents) + _, err := parse.ParseUnitContents(d.Contents) r.AddOnError(c.Append("contents"), err) switch path.Ext(d.Name) { @@ -68,15 +66,3 @@ func (d Dropin) Validate(c cpath.ContextPath) (r report.Report) { return } - -func validateUnitContent(content *string) ([]*unit.UnitOption, error) { - if content == nil { - return []*unit.UnitOption{}, nil - } - c := strings.NewReader(*content) - opts, err := unit.Deserialize(c) - if err != nil { - return nil, fmt.Errorf("invalid unit content: %s", err) - } - return opts, nil -} diff --git a/vendor/github.com/coreos/ignition/v2/config/v3_2/types/config.go b/vendor/github.com/coreos/ignition/v2/config/v3_2/types/config.go index 4b18d5378..0e2fc3703 100644 --- a/vendor/github.com/coreos/ignition/v2/config/v3_2/types/config.go +++ b/vendor/github.com/coreos/ignition/v2/config/v3_2/types/config.go @@ -15,7 +15,12 @@ package types import ( + "github.com/coreos/ignition/v2/config/shared/errors" + "github.com/coreos/ignition/v2/config/util" + "github.com/coreos/go-semver/semver" + "github.com/coreos/vcontext/path" + "github.com/coreos/vcontext/report" ) var ( @@ -24,3 +29,36 @@ var ( Minor: 2, } ) + +func (cfg Config) Validate(c path.ContextPath) (r report.Report) { + systemdPath := "/etc/systemd/system/" + unitPaths := map[string]struct{}{} + for _, unit := range cfg.Systemd.Units { + if !util.NilOrEmpty(unit.Contents) { + pathString := systemdPath + unit.Name + unitPaths[pathString] = struct{}{} + } + for _, dropin := range unit.Dropins { + if !util.NilOrEmpty(dropin.Contents) { + pathString := systemdPath + unit.Name + ".d/" + dropin.Name + unitPaths[pathString] = struct{}{} + } + } + } + for i, f := range cfg.Storage.Files { + if _, exists := unitPaths[f.Path]; exists { + r.AddOnError(c.Append("storage", "files", i, "path"), errors.ErrPathConflictsSystemd) + } + } + for i, d := range cfg.Storage.Directories { + if _, exists := unitPaths[d.Path]; exists { + r.AddOnError(c.Append("storage", "directories", i, "path"), errors.ErrPathConflictsSystemd) + } + } + for i, l := range cfg.Storage.Links { + if _, exists := unitPaths[l.Path]; exists { + r.AddOnError(c.Append("storage", "links", i, "path"), errors.ErrPathConflictsSystemd) + } + } + return +} diff --git a/vendor/github.com/coreos/ignition/v2/config/v3_2/types/storage.go b/vendor/github.com/coreos/ignition/v2/config/v3_2/types/storage.go index fd1b8cecf..5cec008de 100644 --- a/vendor/github.com/coreos/ignition/v2/config/v3_2/types/storage.go +++ b/vendor/github.com/coreos/ignition/v2/config/v3_2/types/storage.go @@ -34,6 +34,14 @@ func (s Storage) MergedKeys() map[string]string { } func (s Storage) Validate(c vpath.ContextPath) (r report.Report) { + s.validateDirectories(c, &r) + s.validateFiles(c, &r) + s.validateLinks(c, &r) + s.validateFilesystems(c, &r) + return +} + +func (s Storage) validateDirectories(c vpath.ContextPath, r *report.Report) { for i, d := range s.Directories { for _, l := range s.Links { if strings.HasPrefix(d.Path, l.Path+"/") { @@ -41,6 +49,9 @@ func (s Storage) Validate(c vpath.ContextPath) (r report.Report) { } } } +} + +func (s Storage) validateFiles(c vpath.ContextPath, r *report.Report) { for i, f := range s.Files { for _, l := range s.Links { if strings.HasPrefix(f.Path, l.Path+"/") { @@ -48,6 +59,9 @@ func (s Storage) Validate(c vpath.ContextPath) (r report.Report) { } } } +} + +func (s Storage) validateLinks(c vpath.ContextPath, r *report.Report) { for i, l1 := range s.Links { for _, l2 := range s.Links { if strings.HasPrefix(l1.Path, l2.Path+"/") { @@ -66,6 +80,32 @@ func (s Storage) Validate(c vpath.ContextPath) (r report.Report) { r.AddOnError(c.Append("links", i), errors.ErrHardLinkToDirectory) } } + ownerCheck := func(ok bool, path vpath.ContextPath) { + if !ok { + r.AddOnWarn(path, errors.ErrHardLinkSpecifiesOwner) + } + } + ownerCheck(l1.User.ID == nil, c.Append("links", i, "user", "id")) + ownerCheck(l1.User.Name == nil, c.Append("links", i, "user", "name")) + ownerCheck(l1.Group.ID == nil, c.Append("links", i, "group", "id")) + ownerCheck(l1.Group.Name == nil, c.Append("links", i, "group", "name")) + } +} + +func (s Storage) validateFilesystems(c vpath.ContextPath, r *report.Report) { + disks := make(map[string]Disk) + for _, d := range s.Disks { + disks[d.Device] = d + } + + for i, f := range s.Filesystems { + disk, exist := disks[f.Device] + if exist { + if len(disk.Partitions) > 0 { + r.AddOnWarn(c.Append("filesystems", i, "device"), errors.ErrPartitionsOverwritten) + } else if !util.IsTrue(f.WipeFilesystem) && util.IsTrue(disk.WipeTable) { + r.AddOnWarn(c.Append("filesystems", i, "device"), errors.ErrFilesystemImplicitWipe) + } + } } - return } diff --git a/vendor/github.com/coreos/ignition/v2/config/v3_2/types/systemd.go b/vendor/github.com/coreos/ignition/v2/config/v3_2/types/systemd.go new file mode 100644 index 000000000..ac521ba73 --- /dev/null +++ b/vendor/github.com/coreos/ignition/v2/config/v3_2/types/systemd.go @@ -0,0 +1,61 @@ +// Copyright 2022 Red Hat, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "regexp" + + "github.com/coreos/ignition/v2/config/shared/errors" + "github.com/coreos/ignition/v2/config/shared/parse" + "github.com/coreos/ignition/v2/config/util" + + vpath "github.com/coreos/vcontext/path" + "github.com/coreos/vcontext/report" +) + +func (s Systemd) Validate(c vpath.ContextPath) (r report.Report) { + units := make(map[string]Unit) + checkInstanceUnit := regexp.MustCompile(`^(.+?)@(.+?)\.service$`) + for _, d := range s.Units { + units[d.Name] = d + } + for index, unit := range s.Units { + if checkInstanceUnit.MatchString(unit.Name) && util.IsTrue(unit.Enabled) { + instUnitSlice := checkInstanceUnit.FindSubmatch([]byte(unit.Name)) + instantiableUnit := string(instUnitSlice[1]) + "@.service" + if _, ok := units[instantiableUnit]; ok && util.NotEmpty(units[instantiableUnit].Contents) { + foundInstallSection := false + // we're doing a separate validation pass on each unit to identify + // if an instantiable unit has the install section. So logging an + // `AddOnError` will produce duplicate errors on bad unit contents + // because we're already doing that while validating a unit separately. + opts, err := parse.ParseUnitContents(units[instantiableUnit].Contents) + if err != nil { + continue + } + for _, section := range opts { + if section.Section == "Install" { + foundInstallSection = true + break + } + } + if !foundInstallSection { + r.AddOnWarn(c.Append("units", index, "contents"), errors.NewNoInstallSectionForInstantiableUnitError(instantiableUnit, unit.Name)) + } + } + } + } + return +} diff --git a/vendor/github.com/coreos/ignition/v2/config/v3_2/types/unit.go b/vendor/github.com/coreos/ignition/v2/config/v3_2/types/unit.go index bc2d3299c..c5ee1e8e3 100644 --- a/vendor/github.com/coreos/ignition/v2/config/v3_2/types/unit.go +++ b/vendor/github.com/coreos/ignition/v2/config/v3_2/types/unit.go @@ -15,15 +15,13 @@ package types import ( - "fmt" "path" - "strings" "github.com/coreos/ignition/v2/config/shared/errors" + "github.com/coreos/ignition/v2/config/shared/parse" "github.com/coreos/ignition/v2/config/shared/validations" "github.com/coreos/ignition/v2/config/util" - "github.com/coreos/go-systemd/v22/unit" cpath "github.com/coreos/vcontext/path" "github.com/coreos/vcontext/report" ) @@ -39,7 +37,7 @@ func (d Dropin) Key() string { func (u Unit) Validate(c cpath.ContextPath) (r report.Report) { r.AddOnError(c.Append("name"), validateName(u.Name)) c = c.Append("contents") - opts, err := validateUnitContent(u.Contents) + opts, err := parse.ParseUnitContents(u.Contents) r.AddOnError(c, err) r.AddOnWarn(c, validations.ValidateInstallSection(u.Name, util.IsTrue(u.Enabled), util.NilOrEmpty(u.Contents), opts)) @@ -57,7 +55,7 @@ func validateName(name string) error { } func (d Dropin) Validate(c cpath.ContextPath) (r report.Report) { - _, err := validateUnitContent(d.Contents) + _, err := parse.ParseUnitContents(d.Contents) r.AddOnError(c.Append("contents"), err) switch path.Ext(d.Name) { @@ -68,15 +66,3 @@ func (d Dropin) Validate(c cpath.ContextPath) (r report.Report) { return } - -func validateUnitContent(content *string) ([]*unit.UnitOption, error) { - if content == nil { - return []*unit.UnitOption{}, nil - } - c := strings.NewReader(*content) - opts, err := unit.Deserialize(c) - if err != nil { - return nil, fmt.Errorf("invalid unit content: %s", err) - } - return opts, nil -} diff --git a/vendor/github.com/coreos/ignition/v2/config/v3_3/config.go b/vendor/github.com/coreos/ignition/v2/config/v3_3/config.go new file mode 100644 index 000000000..446ea67d8 --- /dev/null +++ b/vendor/github.com/coreos/ignition/v2/config/v3_3/config.go @@ -0,0 +1,78 @@ +// Copyright 2020 Red Hat, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v3_3 + +import ( + "github.com/coreos/ignition/v2/config/merge" + "github.com/coreos/ignition/v2/config/shared/errors" + "github.com/coreos/ignition/v2/config/util" + prev "github.com/coreos/ignition/v2/config/v3_2" + "github.com/coreos/ignition/v2/config/v3_3/translate" + "github.com/coreos/ignition/v2/config/v3_3/types" + "github.com/coreos/ignition/v2/config/validate" + + "github.com/coreos/go-semver/semver" + "github.com/coreos/vcontext/report" +) + +func Merge(parent, child types.Config) types.Config { + res, _ := merge.MergeStructTranscribe(parent, child) + return res.(types.Config) +} + +// Parse parses the raw config into a types.Config struct and generates a report of any +// errors, warnings, info, and deprecations it encountered +func Parse(rawConfig []byte) (types.Config, report.Report, error) { + if len(rawConfig) == 0 { + return types.Config{}, report.Report{}, errors.ErrEmpty + } + + var config types.Config + if rpt, err := util.HandleParseErrors(rawConfig, &config); err != nil { + return types.Config{}, rpt, err + } + + version, err := semver.NewVersion(config.Ignition.Version) + + if err != nil || *version != types.MaxVersion { + return types.Config{}, report.Report{}, errors.ErrUnknownVersion + } + + rpt := validate.ValidateWithContext(config, rawConfig) + if rpt.IsFatal() { + return types.Config{}, rpt, errors.ErrInvalid + } + + return config, rpt, nil +} + +// ParseCompatibleVersion parses the raw config of version 3.3.0 or +// lesser into a 3.3 types.Config struct and generates a report of any errors, +// warnings, info, and deprecations it encountered +func ParseCompatibleVersion(raw []byte) (types.Config, report.Report, error) { + version, rpt, err := util.GetConfigVersion(raw) + if err != nil { + return types.Config{}, rpt, err + } + + if version == types.MaxVersion { + return Parse(raw) + } + prevCfg, r, err := prev.ParseCompatibleVersion(raw) + if err != nil { + return types.Config{}, r, err + } + return translate.Translate(prevCfg), r, nil +} diff --git a/vendor/github.com/coreos/ignition/v2/config/v3_3/translate/translate.go b/vendor/github.com/coreos/ignition/v2/config/v3_3/translate/translate.go new file mode 100644 index 000000000..656ad0a4a --- /dev/null +++ b/vendor/github.com/coreos/ignition/v2/config/v3_3/translate/translate.go @@ -0,0 +1,95 @@ +// Copyright 2020 Red Hat, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package translate + +import ( + "github.com/coreos/ignition/v2/config/translate" + "github.com/coreos/ignition/v2/config/util" + old_types "github.com/coreos/ignition/v2/config/v3_2/types" + "github.com/coreos/ignition/v2/config/v3_3/types" +) + +func translateIgnition(old old_types.Ignition) (ret types.Ignition) { + // use a new translator so we don't recurse infinitely + translate.NewTranslator().Translate(&old, &ret) + ret.Version = types.MaxVersion.String() + return +} + +func translateRaid(old old_types.Raid) (ret types.Raid) { + tr := translate.NewTranslator() + tr.Translate(&old.Devices, &ret.Devices) + ret.Level = util.StrToPtr(old.Level) + tr.Translate(&old.Name, &ret.Name) + tr.Translate(&old.Options, &ret.Options) + tr.Translate(&old.Spares, &ret.Spares) + return +} + +func translateLuks(old old_types.Luks) (ret types.Luks) { + tr := translate.NewTranslator() + tr.AddCustomTranslator(translateClevis) + if old.Clevis != nil { + tr.Translate(old.Clevis, &ret.Clevis) + } + tr.Translate(&old.Device, &ret.Device) + tr.Translate(&old.KeyFile, &ret.KeyFile) + tr.Translate(&old.Label, &ret.Label) + tr.Translate(&old.Name, &ret.Name) + tr.Translate(&old.Options, &ret.Options) + tr.Translate(&old.UUID, &ret.UUID) + tr.Translate(&old.WipeVolume, &ret.WipeVolume) + return +} + +func translateClevis(old old_types.Clevis) (ret types.Clevis) { + tr := translate.NewTranslator() + tr.AddCustomTranslator(translateClevisCustom) + if old.Custom != nil { + tr.Translate(old.Custom, &ret.Custom) + } + tr.Translate(&old.Tang, &ret.Tang) + tr.Translate(&old.Threshold, &ret.Threshold) + tr.Translate(&old.Tpm2, &ret.Tpm2) + return +} + +func translateClevisCustom(old old_types.Custom) (ret types.ClevisCustom) { + tr := translate.NewTranslator() + ret.Config = util.StrToPtr(old.Config) + tr.Translate(&old.NeedsNetwork, &ret.NeedsNetwork) + ret.Pin = util.StrToPtr(old.Pin) + return +} + +func translateLinkEmbedded1(old old_types.LinkEmbedded1) (ret types.LinkEmbedded1) { + tr := translate.NewTranslator() + tr.Translate(&old.Hard, &ret.Hard) + ret.Target = util.StrToPtr(old.Target) + return +} + +func Translate(old old_types.Config) (ret types.Config) { + tr := translate.NewTranslator() + tr.AddCustomTranslator(translateIgnition) + tr.AddCustomTranslator(translateRaid) + tr.AddCustomTranslator(translateLuks) + tr.AddCustomTranslator(translateLinkEmbedded1) + tr.Translate(&old.Ignition, &ret.Ignition) + tr.Translate(&old.Passwd, &ret.Passwd) + tr.Translate(&old.Storage, &ret.Storage) + tr.Translate(&old.Systemd, &ret.Systemd) + return +} diff --git a/vendor/github.com/coreos/ignition/v2/config/v3_3/types/clevis.go b/vendor/github.com/coreos/ignition/v2/config/v3_3/types/clevis.go new file mode 100644 index 000000000..68887d434 --- /dev/null +++ b/vendor/github.com/coreos/ignition/v2/config/v3_3/types/clevis.go @@ -0,0 +1,49 @@ +// Copyright 2020 Red Hat, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "github.com/coreos/ignition/v2/config/shared/errors" + "github.com/coreos/ignition/v2/config/util" + + "github.com/coreos/vcontext/path" + "github.com/coreos/vcontext/report" +) + +func (c Clevis) IsPresent() bool { + return util.NotEmpty(c.Custom.Pin) || + len(c.Tang) > 0 || + util.IsTrue(c.Tpm2) || + c.Threshold != nil && *c.Threshold != 0 +} + +func (cu ClevisCustom) Validate(c path.ContextPath) (r report.Report) { + if util.NilOrEmpty(cu.Pin) && util.NilOrEmpty(cu.Config) && !util.IsTrue(cu.NeedsNetwork) { + return + } + if util.NotEmpty(cu.Pin) { + switch *cu.Pin { + case "tpm2", "tang", "sss": + default: + r.AddOnError(c.Append("pin"), errors.ErrUnknownClevisPin) + } + } else { + r.AddOnError(c.Append("pin"), errors.ErrClevisPinRequired) + } + if util.NilOrEmpty(cu.Config) { + r.AddOnError(c.Append("config"), errors.ErrClevisConfigRequired) + } + return +} diff --git a/vendor/github.com/coreos/ignition/v2/config/v3_3/types/config.go b/vendor/github.com/coreos/ignition/v2/config/v3_3/types/config.go new file mode 100644 index 000000000..9158e7f01 --- /dev/null +++ b/vendor/github.com/coreos/ignition/v2/config/v3_3/types/config.go @@ -0,0 +1,64 @@ +// Copyright 2020 Red Hat, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "github.com/coreos/ignition/v2/config/shared/errors" + "github.com/coreos/ignition/v2/config/util" + + "github.com/coreos/go-semver/semver" + "github.com/coreos/vcontext/path" + "github.com/coreos/vcontext/report" +) + +var ( + MaxVersion = semver.Version{ + Major: 3, + Minor: 3, + } +) + +func (cfg Config) Validate(c path.ContextPath) (r report.Report) { + systemdPath := "/etc/systemd/system/" + unitPaths := map[string]struct{}{} + for _, unit := range cfg.Systemd.Units { + if !util.NilOrEmpty(unit.Contents) { + pathString := systemdPath + unit.Name + unitPaths[pathString] = struct{}{} + } + for _, dropin := range unit.Dropins { + if !util.NilOrEmpty(dropin.Contents) { + pathString := systemdPath + unit.Name + ".d/" + dropin.Name + unitPaths[pathString] = struct{}{} + } + } + } + for i, f := range cfg.Storage.Files { + if _, exists := unitPaths[f.Path]; exists { + r.AddOnError(c.Append("storage", "files", i, "path"), errors.ErrPathConflictsSystemd) + } + } + for i, d := range cfg.Storage.Directories { + if _, exists := unitPaths[d.Path]; exists { + r.AddOnError(c.Append("storage", "directories", i, "path"), errors.ErrPathConflictsSystemd) + } + } + for i, l := range cfg.Storage.Links { + if _, exists := unitPaths[l.Path]; exists { + r.AddOnError(c.Append("storage", "links", i, "path"), errors.ErrPathConflictsSystemd) + } + } + return +} diff --git a/vendor/github.com/coreos/ignition/v2/config/v3_3/types/device.go b/vendor/github.com/coreos/ignition/v2/config/v3_3/types/device.go new file mode 100644 index 000000000..a10ce97b0 --- /dev/null +++ b/vendor/github.com/coreos/ignition/v2/config/v3_3/types/device.go @@ -0,0 +1,25 @@ +// Copyright 2020 Red Hat, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "github.com/coreos/vcontext/path" + "github.com/coreos/vcontext/report" +) + +func (d Device) Validate(c path.ContextPath) (r report.Report) { + r.AddOnError(c, validatePath(string(d))) + return +} diff --git a/vendor/github.com/coreos/ignition/v2/config/v3_3/types/directory.go b/vendor/github.com/coreos/ignition/v2/config/v3_3/types/directory.go new file mode 100644 index 000000000..b01a6bf9d --- /dev/null +++ b/vendor/github.com/coreos/ignition/v2/config/v3_3/types/directory.go @@ -0,0 +1,27 @@ +// Copyright 2020 Red Hat, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "github.com/coreos/vcontext/path" + "github.com/coreos/vcontext/report" +) + +func (d Directory) Validate(c path.ContextPath) (r report.Report) { + r.Merge(d.Node.Validate(c)) + r.AddOnError(c.Append("mode"), validateMode(d.Mode)) + r.AddOnWarn(c.Append("mode"), validateModeSpecialBits(d.Mode)) + return +} diff --git a/vendor/github.com/coreos/ignition/v2/config/v3_3/types/disk.go b/vendor/github.com/coreos/ignition/v2/config/v3_3/types/disk.go new file mode 100644 index 000000000..8caf8499d --- /dev/null +++ b/vendor/github.com/coreos/ignition/v2/config/v3_3/types/disk.go @@ -0,0 +1,135 @@ +// Copyright 2020 Red Hat, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "github.com/coreos/ignition/v2/config/shared/errors" + "github.com/coreos/ignition/v2/config/util" + + "github.com/coreos/vcontext/path" + "github.com/coreos/vcontext/report" +) + +func (d Disk) Key() string { + return d.Device +} + +func (n Disk) Validate(c path.ContextPath) (r report.Report) { + if len(n.Device) == 0 { + r.AddOnError(c.Append("device"), errors.ErrDiskDeviceRequired) + return + } + r.AddOnError(c.Append("device"), validatePath(n.Device)) + + if collides, p := n.partitionNumbersCollide(); collides { + r.AddOnError(c.Append("partitions", p), errors.ErrPartitionNumbersCollide) + } + if overlaps, p := n.partitionsOverlap(); overlaps { + r.AddOnError(c.Append("partitions", p), errors.ErrPartitionsOverlap) + } + if n.partitionsMixZeroesAndNonexistence() { + r.AddOnError(c.Append("partitions"), errors.ErrZeroesWithShouldNotExist) + } + if collides, p := n.partitionLabelsCollide(); collides { + r.AddOnError(c.Append("partitions", p), errors.ErrDuplicateLabels) + } + return +} + +// partitionNumbersCollide returns true if partition numbers in n.Partitions are not unique. It also returns the +// index of the colliding partition +func (n Disk) partitionNumbersCollide() (bool, int) { + m := map[int][]int{} // from partition number to index into array + for i, p := range n.Partitions { + if p.Number != 0 { + // a number of 0 means next available number, multiple devices can specify this + m[p.Number] = append(m[p.Number], i) + } + } + for _, n := range m { + if len(n) > 1 { + // TODO(vc): return information describing the collision for logging + return true, n[1] + } + } + return false, 0 +} + +func (d Disk) partitionLabelsCollide() (bool, int) { + m := map[string]struct{}{} + for i, p := range d.Partitions { + if p.Label != nil { + // a number of 0 means next available number, multiple devices can specify this + if _, exists := m[*p.Label]; exists { + return true, i + } + m[*p.Label] = struct{}{} + } + } + return false, 0 +} + +// end returns the last sector of a partition. Only used by partitionsOverlap. Requires non-nil Start and Size. +func (p Partition) end() int { + if *p.SizeMiB == 0 { + // a size of 0 means "fill available", just return the start as the end for those. + return *p.StartMiB + } + return *p.StartMiB + *p.SizeMiB - 1 +} + +// partitionsOverlap returns true if any explicitly dimensioned partitions overlap. It also returns the index of +// the overlapping partition +func (n Disk) partitionsOverlap() (bool, int) { + for _, p := range n.Partitions { + // Starts of 0 are placed by sgdisk into the "largest available block" at that time. + // We aren't going to check those for overlap since we don't have the disk geometry. + if p.StartMiB == nil || p.SizeMiB == nil || *p.StartMiB == 0 { + continue + } + + for i, o := range n.Partitions { + if o.StartMiB == nil || o.SizeMiB == nil || p == o || *o.StartMiB == 0 { + continue + } + + // is p.StartMiB within o? + if *p.StartMiB >= *o.StartMiB && *p.StartMiB <= o.end() { + return true, i + } + + // is p.end() within o? + if p.end() >= *o.StartMiB && p.end() <= o.end() { + return true, i + } + + // do p.StartMiB and p.end() straddle o? + if *p.StartMiB < *o.StartMiB && p.end() > o.end() { + return true, i + } + } + } + return false, 0 +} + +func (n Disk) partitionsMixZeroesAndNonexistence() bool { + hasZero := false + hasShouldNotExist := false + for _, p := range n.Partitions { + hasShouldNotExist = hasShouldNotExist || util.IsFalse(p.ShouldExist) + hasZero = hasZero || (p.Number == 0) + } + return hasZero && hasShouldNotExist +} diff --git a/vendor/github.com/coreos/ignition/v2/config/v3_3/types/file.go b/vendor/github.com/coreos/ignition/v2/config/v3_3/types/file.go new file mode 100644 index 000000000..4e7566bd3 --- /dev/null +++ b/vendor/github.com/coreos/ignition/v2/config/v3_3/types/file.go @@ -0,0 +1,44 @@ +// Copyright 2020 Red Hat, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "github.com/coreos/ignition/v2/config/shared/errors" + "github.com/coreos/ignition/v2/config/util" + + "github.com/coreos/vcontext/path" + "github.com/coreos/vcontext/report" +) + +func (f File) Validate(c path.ContextPath) (r report.Report) { + r.Merge(f.Node.Validate(c)) + r.AddOnError(c.Append("mode"), validateMode(f.Mode)) + r.AddOnWarn(c.Append("mode"), validateModeSpecialBits(f.Mode)) + r.AddOnError(c.Append("overwrite"), f.validateOverwrite()) + return +} + +func (f File) validateOverwrite() error { + if util.IsTrue(f.Overwrite) && f.Contents.Source == nil { + return errors.ErrOverwriteAndNilSource + } + return nil +} + +func (f FileEmbedded1) IgnoreDuplicates() map[string]struct{} { + return map[string]struct{}{ + "Append": {}, + } +} diff --git a/vendor/github.com/coreos/ignition/v2/config/v3_3/types/filesystem.go b/vendor/github.com/coreos/ignition/v2/config/v3_3/types/filesystem.go new file mode 100644 index 000000000..c722b3633 --- /dev/null +++ b/vendor/github.com/coreos/ignition/v2/config/v3_3/types/filesystem.go @@ -0,0 +1,106 @@ +// Copyright 2020 Red Hat, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "github.com/coreos/ignition/v2/config/shared/errors" + "github.com/coreos/ignition/v2/config/util" + + "github.com/coreos/vcontext/path" + "github.com/coreos/vcontext/report" +) + +func (f Filesystem) Key() string { + return f.Device +} + +func (f Filesystem) IgnoreDuplicates() map[string]struct{} { + return map[string]struct{}{ + "Options": {}, + "MountOptions": {}, + } +} + +func (f Filesystem) Validate(c path.ContextPath) (r report.Report) { + r.AddOnError(c.Append("path"), f.validatePath()) + r.AddOnError(c.Append("device"), validatePath(f.Device)) + r.AddOnError(c.Append("format"), f.validateFormat()) + r.AddOnError(c.Append("label"), f.validateLabel()) + return +} + +func (f Filesystem) validatePath() error { + return validatePathNilOK(f.Path) +} + +func (f Filesystem) validateFormat() error { + if util.NilOrEmpty(f.Format) { + if util.NotEmpty(f.Path) || + util.NotEmpty(f.Label) || + util.NotEmpty(f.UUID) || + util.IsTrue(f.WipeFilesystem) || + len(f.MountOptions) != 0 || + len(f.Options) != 0 { + return errors.ErrFormatNilWithOthers + } + } else { + switch *f.Format { + case "ext4", "btrfs", "xfs", "swap", "vfat", "none": + default: + return errors.ErrFilesystemInvalidFormat + } + } + return nil +} + +func (f Filesystem) validateLabel() error { + if util.NilOrEmpty(f.Label) { + return nil + } + if util.NilOrEmpty(f.Format) { + return errors.ErrLabelNeedsFormat + } + + switch *f.Format { + case "ext4": + if len(*f.Label) > 16 { + // source: man mkfs.ext4 + return errors.ErrExt4LabelTooLong + } + case "btrfs": + if len(*f.Label) > 256 { + // source: man mkfs.btrfs + return errors.ErrBtrfsLabelTooLong + } + case "xfs": + if len(*f.Label) > 12 { + // source: man mkfs.xfs + return errors.ErrXfsLabelTooLong + } + case "swap": + // mkswap's man page does not state a limit on label size, but through + // experimentation it appears that mkswap will truncate long labels to + // 15 characters, so let's enforce that. + if len(*f.Label) > 15 { + return errors.ErrSwapLabelTooLong + } + case "vfat": + if len(*f.Label) > 11 { + // source: man mkfs.fat + return errors.ErrVfatLabelTooLong + } + } + return nil +} diff --git a/vendor/github.com/coreos/ignition/v2/config/v3_3/types/headers.go b/vendor/github.com/coreos/ignition/v2/config/v3_3/types/headers.go new file mode 100644 index 000000000..be1aadad9 --- /dev/null +++ b/vendor/github.com/coreos/ignition/v2/config/v3_3/types/headers.go @@ -0,0 +1,65 @@ +// Copyright 2020 Red Hat, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "net/http" + + "github.com/coreos/ignition/v2/config/shared/errors" + "github.com/coreos/vcontext/path" + "github.com/coreos/vcontext/report" +) + +// Parse generates standard net/http headers from the data in HTTPHeaders +func (hs HTTPHeaders) Parse() (http.Header, error) { + headers := http.Header{} + for _, header := range hs { + if header.Name == "" { + return nil, errors.ErrEmptyHTTPHeaderName + } + if header.Value == nil || string(*header.Value) == "" { + return nil, errors.ErrInvalidHTTPHeader + } + headers.Add(header.Name, string(*header.Value)) + } + return headers, nil +} + +func (h HTTPHeader) Validate(c path.ContextPath) (r report.Report) { + r.AddOnError(c.Append("name"), h.validateName()) + r.AddOnError(c.Append("value"), h.validateValue()) + return +} + +func (h HTTPHeader) validateName() error { + if h.Name == "" { + return errors.ErrEmptyHTTPHeaderName + } + return nil +} + +func (h HTTPHeader) validateValue() error { + if h.Value == nil { + return nil + } + if string(*h.Value) == "" { + return errors.ErrInvalidHTTPHeader + } + return nil +} + +func (h HTTPHeader) Key() string { + return h.Name +} diff --git a/vendor/github.com/coreos/ignition/v2/config/v3_3/types/ignition.go b/vendor/github.com/coreos/ignition/v2/config/v3_3/types/ignition.go new file mode 100644 index 000000000..190445bda --- /dev/null +++ b/vendor/github.com/coreos/ignition/v2/config/v3_3/types/ignition.go @@ -0,0 +1,49 @@ +// Copyright 2020 Red Hat, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "github.com/coreos/go-semver/semver" + + "github.com/coreos/ignition/v2/config/shared/errors" + + "github.com/coreos/vcontext/path" + "github.com/coreos/vcontext/report" +) + +func (v Ignition) Semver() (*semver.Version, error) { + return semver.NewVersion(v.Version) +} + +func (ic IgnitionConfig) Validate(c path.ContextPath) (r report.Report) { + for i, res := range ic.Merge { + r.AddOnError(c.Append("merge", i), res.validateRequiredSource()) + } + return +} + +func (v Ignition) Validate(c path.ContextPath) (r report.Report) { + c = c.Append("version") + tv, err := v.Semver() + if err != nil { + r.AddOnError(c, errors.ErrInvalidVersion) + return + } + + if MaxVersion != *tv { + r.AddOnError(c, errors.ErrUnknownVersion) + } + return +} diff --git a/vendor/github.com/coreos/ignition/v2/config/v3_3/types/kargs.go b/vendor/github.com/coreos/ignition/v2/config/v3_3/types/kargs.go new file mode 100644 index 000000000..42c29408e --- /dev/null +++ b/vendor/github.com/coreos/ignition/v2/config/v3_3/types/kargs.go @@ -0,0 +1,22 @@ +// Copyright 2021 Red Hat, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +func (k KernelArguments) MergedKeys() map[string]string { + return map[string]string{ + "ShouldExist": "KernelArgument", + "ShouldNotExist": "KernelArgument", + } +} diff --git a/vendor/github.com/coreos/ignition/v2/config/v3_3/types/luks.go b/vendor/github.com/coreos/ignition/v2/config/v3_3/types/luks.go new file mode 100644 index 000000000..90d723870 --- /dev/null +++ b/vendor/github.com/coreos/ignition/v2/config/v3_3/types/luks.go @@ -0,0 +1,71 @@ +// Copyright 2020 Red Hat, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "strings" + + "github.com/coreos/ignition/v2/config/shared/errors" + "github.com/coreos/ignition/v2/config/util" + + "github.com/coreos/vcontext/path" + "github.com/coreos/vcontext/report" +) + +func (l Luks) Key() string { + return l.Name +} + +func (l Luks) IgnoreDuplicates() map[string]struct{} { + return map[string]struct{}{ + "Options": {}, + } +} + +func (l Luks) Validate(c path.ContextPath) (r report.Report) { + if strings.Contains(l.Name, "/") { + r.AddOnError(c.Append("name"), errors.ErrLuksNameContainsSlash) + } + r.AddOnError(c.Append("label"), l.validateLabel()) + if util.NilOrEmpty(l.Device) { + r.AddOnError(c.Append("device"), errors.ErrDiskDeviceRequired) + } else { + r.AddOnError(c.Append("device"), validatePath(*l.Device)) + } + + if util.NotEmpty(l.Clevis.Custom.Pin) && (len(l.Clevis.Tang) > 0 || util.IsTrue(l.Clevis.Tpm2) || (l.Clevis.Threshold != nil && *l.Clevis.Threshold != 0)) { + r.AddOnError(c.Append("clevis"), errors.ErrClevisCustomWithOthers) + } + + // fail if a key file is provided and is not valid + if err := validateURLNilOK(l.KeyFile.Source); err != nil { + r.AddOnError(c.Append("keys"), errors.ErrInvalidLuksKeyFile) + } + return +} + +func (l Luks) validateLabel() error { + if util.NilOrEmpty(l.Label) { + return nil + } + + if len(*l.Label) > 47 { + // LUKS2_LABEL_L has a maximum length of 48 (including the null terminator) + // https://gitlab.com/cryptsetup/cryptsetup/-/blob/1633f030e89ad2f11ae649ba9600997a41abd3fc/lib/luks2/luks2.h#L86 + return errors.ErrLuksLabelTooLong + } + + return nil +} diff --git a/vendor/github.com/coreos/ignition/v2/config/v3_3/types/mode.go b/vendor/github.com/coreos/ignition/v2/config/v3_3/types/mode.go new file mode 100644 index 000000000..ad3e51c22 --- /dev/null +++ b/vendor/github.com/coreos/ignition/v2/config/v3_3/types/mode.go @@ -0,0 +1,36 @@ +// Copyright 2020 Red Hat, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "github.com/coreos/ignition/v2/config/shared/errors" +) + +func validateMode(m *int) error { + if m != nil && (*m < 0 || *m > 07777) { + return errors.ErrFileIllegalMode + } + return nil +} + +func validateModeSpecialBits(m *int) error { + if m != nil { + mode := uint32(*m) + if mode&07000 != 0 { + return errors.ErrModeSpecialBits + } + } + return nil +} diff --git a/vendor/github.com/coreos/ignition/v2/config/v3_3/types/node.go b/vendor/github.com/coreos/ignition/v2/config/v3_3/types/node.go new file mode 100644 index 000000000..248276e73 --- /dev/null +++ b/vendor/github.com/coreos/ignition/v2/config/v3_3/types/node.go @@ -0,0 +1,59 @@ +// Copyright 2020 Red Hat, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "path" + + "github.com/coreos/ignition/v2/config/shared/errors" + "github.com/coreos/ignition/v2/config/util" + + vpath "github.com/coreos/vcontext/path" + "github.com/coreos/vcontext/report" +) + +func (n Node) Key() string { + return n.Path +} + +func (n Node) Validate(c vpath.ContextPath) (r report.Report) { + r.AddOnError(c.Append("path"), validatePath(n.Path)) + return +} + +func (n Node) Depth() int { + count := 0 + for p := path.Clean(string(n.Path)); p != "/"; count++ { + p = path.Dir(p) + } + return count +} + +func validateIDorName(id *int, name *string) error { + if id != nil && util.NotEmpty(name) { + return errors.ErrBothIDAndNameSet + } + return nil +} + +func (nu NodeUser) Validate(c vpath.ContextPath) (r report.Report) { + r.AddOnError(c, validateIDorName(nu.ID, nu.Name)) + return +} + +func (ng NodeGroup) Validate(c vpath.ContextPath) (r report.Report) { + r.AddOnError(c, validateIDorName(ng.ID, ng.Name)) + return +} diff --git a/vendor/github.com/coreos/ignition/v2/config/v3_3/types/partition.go b/vendor/github.com/coreos/ignition/v2/config/v3_3/types/partition.go new file mode 100644 index 000000000..1b2d97edf --- /dev/null +++ b/vendor/github.com/coreos/ignition/v2/config/v3_3/types/partition.go @@ -0,0 +1,91 @@ +// Copyright 2020 Red Hat, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "fmt" + "regexp" + "strings" + + "github.com/coreos/ignition/v2/config/shared/errors" + "github.com/coreos/ignition/v2/config/util" + + "github.com/coreos/vcontext/path" + "github.com/coreos/vcontext/report" +) + +const ( + guidRegexStr = "^(|[[:xdigit:]]{8}-[[:xdigit:]]{4}-[[:xdigit:]]{4}-[[:xdigit:]]{4}-[[:xdigit:]]{12})$" +) + +var ( + guidRegex = regexp.MustCompile(guidRegexStr) +) + +func (p Partition) Key() string { + if p.Number != 0 { + return fmt.Sprintf("number:%d", p.Number) + } else if p.Label != nil { + return fmt.Sprintf("label:%s", *p.Label) + } else { + return "" + } +} + +func (p Partition) Validate(c path.ContextPath) (r report.Report) { + if util.IsFalse(p.ShouldExist) && + (p.Label != nil || util.NotEmpty(p.TypeGUID) || util.NotEmpty(p.GUID) || p.StartMiB != nil || p.SizeMiB != nil) { + r.AddOnError(c, errors.ErrShouldNotExistWithOthers) + } + if p.Number == 0 && p.Label == nil { + r.AddOnError(c, errors.ErrNeedLabelOrNumber) + } + + r.AddOnError(c.Append("label"), p.validateLabel()) + r.AddOnError(c.Append("guid"), validateGUID(p.GUID)) + r.AddOnError(c.Append("typeGuid"), validateGUID(p.TypeGUID)) + return +} + +func (p Partition) validateLabel() error { + if p.Label == nil { + return nil + } + // http://en.wikipedia.org/wiki/GUID_Partition_Table#Partition_entries: + // 56 (0x38) 72 bytes Partition name (36 UTF-16LE code units) + + // XXX(vc): note GPT calls it a name, we're using label for consistency + // with udev naming /dev/disk/by-partlabel/*. + if len(*p.Label) > 36 { + return errors.ErrLabelTooLong + } + + // sgdisk uses colons for delimitting compound arguments and does not allow escaping them. + if strings.Contains(*p.Label, ":") { + return errors.ErrLabelContainsColon + } + return nil +} + +func validateGUID(guidPointer *string) error { + if guidPointer == nil { + return nil + } + guid := *guidPointer + if ok := guidRegex.MatchString(guid); !ok { + return errors.ErrDoesntMatchGUIDRegex + } + return nil +} diff --git a/vendor/github.com/coreos/ignition/v2/config/v3_3/types/passwd.go b/vendor/github.com/coreos/ignition/v2/config/v3_3/types/passwd.go new file mode 100644 index 000000000..4060a2a6f --- /dev/null +++ b/vendor/github.com/coreos/ignition/v2/config/v3_3/types/passwd.go @@ -0,0 +1,23 @@ +// Copyright 2020 Red Hat, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +func (p PasswdUser) Key() string { + return p.Name +} + +func (g PasswdGroup) Key() string { + return g.Name +} diff --git a/vendor/github.com/coreos/ignition/v2/config/v3_3/types/path.go b/vendor/github.com/coreos/ignition/v2/config/v3_3/types/path.go new file mode 100644 index 000000000..131e300c1 --- /dev/null +++ b/vendor/github.com/coreos/ignition/v2/config/v3_3/types/path.go @@ -0,0 +1,42 @@ +// Copyright 2020 Red Hat, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "path" + + "github.com/coreos/ignition/v2/config/shared/errors" + "github.com/coreos/ignition/v2/config/util" +) + +func validatePath(p string) error { + if p == "" { + return errors.ErrNoPath + } + if !path.IsAbs(p) { + return errors.ErrPathRelative + } + if path.Clean(p) != p { + return errors.ErrDirtyPath + } + return nil +} + +func validatePathNilOK(p *string) error { + if util.NilOrEmpty(p) { + return nil + } + return validatePath(*p) +} diff --git a/vendor/github.com/coreos/ignition/v2/config/v3_3/types/proxy.go b/vendor/github.com/coreos/ignition/v2/config/v3_3/types/proxy.go new file mode 100644 index 000000000..d48d210a0 --- /dev/null +++ b/vendor/github.com/coreos/ignition/v2/config/v3_3/types/proxy.go @@ -0,0 +1,49 @@ +// Copyright 2020 Red Hat, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "net/url" + + "github.com/coreos/ignition/v2/config/shared/errors" + + "github.com/coreos/vcontext/path" + "github.com/coreos/vcontext/report" +) + +func (p Proxy) Validate(c path.ContextPath) (r report.Report) { + validateProxyURL(p.HTTPProxy, c.Append("httpProxy"), &r, true) + validateProxyURL(p.HTTPSProxy, c.Append("httpsProxy"), &r, false) + return +} + +func validateProxyURL(s *string, p path.ContextPath, r *report.Report, httpOk bool) { + if s == nil { + return + } + u, err := url.Parse(*s) + if err != nil { + r.AddOnError(p, errors.ErrInvalidUrl) + return + } + + if u.Scheme != "https" && u.Scheme != "http" { + r.AddOnError(p, errors.ErrInvalidProxy) + return + } + if u.Scheme == "http" && !httpOk { + r.AddOnWarn(p, errors.ErrInsecureProxy) + } +} diff --git a/vendor/github.com/coreos/ignition/v2/config/v3_3/types/raid.go b/vendor/github.com/coreos/ignition/v2/config/v3_3/types/raid.go new file mode 100644 index 000000000..9d69aa366 --- /dev/null +++ b/vendor/github.com/coreos/ignition/v2/config/v3_3/types/raid.go @@ -0,0 +1,62 @@ +// Copyright 2020 Red Hat, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "github.com/coreos/ignition/v2/config/shared/errors" + "github.com/coreos/ignition/v2/config/util" + + "github.com/coreos/vcontext/path" + "github.com/coreos/vcontext/report" +) + +func (r Raid) Key() string { + return r.Name +} + +func (r Raid) IgnoreDuplicates() map[string]struct{} { + return map[string]struct{}{ + "Options": {}, + } +} + +func (ra Raid) Validate(c path.ContextPath) (r report.Report) { + r.AddOnError(c.Append("level"), ra.validateLevel()) + if len(ra.Devices) == 0 { + r.AddOnError(c.Append("devices"), errors.ErrRaidDevicesRequired) + } + return +} + +func (r Raid) validateLevel() error { + if util.NilOrEmpty(r.Level) { + return errors.ErrRaidLevelRequired + } + switch *r.Level { + case "linear", "raid0", "0", "stripe": + if r.Spares != nil && *r.Spares != 0 { + return errors.ErrSparesUnsupportedForLevel + } + case "raid1", "1", "mirror": + case "raid4", "4": + case "raid5", "5": + case "raid6", "6": + case "raid10", "10": + default: + return errors.ErrUnrecognizedRaidLevel + } + + return nil +} diff --git a/vendor/github.com/coreos/ignition/v2/config/v3_3/types/resource.go b/vendor/github.com/coreos/ignition/v2/config/v3_3/types/resource.go new file mode 100644 index 000000000..68da6c7b7 --- /dev/null +++ b/vendor/github.com/coreos/ignition/v2/config/v3_3/types/resource.go @@ -0,0 +1,91 @@ +// Copyright 2020 Red Hat, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "net/url" + + "github.com/coreos/ignition/v2/config/shared/errors" + "github.com/coreos/ignition/v2/config/util" + + "github.com/coreos/vcontext/path" + "github.com/coreos/vcontext/report" +) + +func (res Resource) Key() string { + if res.Source == nil { + return "" + } + return *res.Source +} + +func (res Resource) Validate(c path.ContextPath) (r report.Report) { + r.AddOnError(c.Append("compression"), res.validateCompression()) + r.AddOnError(c.Append("verification", "hash"), res.validateVerification()) + r.AddOnError(c.Append("source"), validateURLNilOK(res.Source)) + r.AddOnError(c.Append("httpHeaders"), res.validateSchemeForHTTPHeaders()) + return +} + +func (res Resource) validateCompression() error { + if res.Compression != nil { + switch *res.Compression { + case "", "gzip": + default: + return errors.ErrCompressionInvalid + } + } + return nil +} + +func (res Resource) validateVerification() error { + if res.Verification.Hash != nil && res.Source == nil { + return errors.ErrVerificationAndNilSource + } + return nil +} + +func (res Resource) validateSchemeForHTTPHeaders() error { + if len(res.HTTPHeaders) < 1 { + return nil + } + + if util.NilOrEmpty(res.Source) { + return errors.ErrInvalidUrl + } + + u, err := url.Parse(*res.Source) + if err != nil { + return errors.ErrInvalidUrl + } + + switch u.Scheme { + case "http", "https": + return nil + default: + return errors.ErrUnsupportedSchemeForHTTPHeaders + } +} + +// Ensure that the Source is specified and valid. This is not called by +// Resource.Validate() because some structs that embed Resource don't +// require Source to be specified. Containing structs that require Source +// should call this function from their Validate(). +func (res Resource) validateRequiredSource() error { + if util.NilOrEmpty(res.Source) { + return errors.ErrSourceRequired + } + return validateURL(*res.Source) +} diff --git a/vendor/github.com/coreos/ignition/v2/config/v3_3/types/schema.go b/vendor/github.com/coreos/ignition/v2/config/v3_3/types/schema.go new file mode 100644 index 000000000..8722944f1 --- /dev/null +++ b/vendor/github.com/coreos/ignition/v2/config/v3_3/types/schema.go @@ -0,0 +1,254 @@ +package types + +// generated by "schematyper --package=types config/v3_3/schema/ignition.json -o config/v3_3/types/schema.go --root-type=Config" -- DO NOT EDIT + +type Clevis struct { + Custom ClevisCustom `json:"custom,omitempty"` + Tang []Tang `json:"tang,omitempty"` + Threshold *int `json:"threshold,omitempty"` + Tpm2 *bool `json:"tpm2,omitempty"` +} + +type ClevisCustom struct { + Config *string `json:"config,omitempty"` + NeedsNetwork *bool `json:"needsNetwork,omitempty"` + Pin *string `json:"pin,omitempty"` +} + +type Config struct { + Ignition Ignition `json:"ignition"` + KernelArguments KernelArguments `json:"kernelArguments,omitempty"` + Passwd Passwd `json:"passwd,omitempty"` + Storage Storage `json:"storage,omitempty"` + Systemd Systemd `json:"systemd,omitempty"` +} + +type Device string + +type Directory struct { + Node + DirectoryEmbedded1 +} + +type DirectoryEmbedded1 struct { + Mode *int `json:"mode,omitempty"` +} + +type Disk struct { + Device string `json:"device"` + Partitions []Partition `json:"partitions,omitempty"` + WipeTable *bool `json:"wipeTable,omitempty"` +} + +type Dropin struct { + Contents *string `json:"contents,omitempty"` + Name string `json:"name"` +} + +type File struct { + Node + FileEmbedded1 +} + +type FileEmbedded1 struct { + Append []Resource `json:"append,omitempty"` + Contents Resource `json:"contents,omitempty"` + Mode *int `json:"mode,omitempty"` +} + +type Filesystem struct { + Device string `json:"device"` + Format *string `json:"format,omitempty"` + Label *string `json:"label,omitempty"` + MountOptions []MountOption `json:"mountOptions,omitempty"` + Options []FilesystemOption `json:"options,omitempty"` + Path *string `json:"path,omitempty"` + UUID *string `json:"uuid,omitempty"` + WipeFilesystem *bool `json:"wipeFilesystem,omitempty"` +} + +type FilesystemOption string + +type Group string + +type HTTPHeader struct { + Name string `json:"name"` + Value *string `json:"value,omitempty"` +} + +type HTTPHeaders []HTTPHeader + +type Ignition struct { + Config IgnitionConfig `json:"config,omitempty"` + Proxy Proxy `json:"proxy,omitempty"` + Security Security `json:"security,omitempty"` + Timeouts Timeouts `json:"timeouts,omitempty"` + Version string `json:"version,omitempty"` +} + +type IgnitionConfig struct { + Merge []Resource `json:"merge,omitempty"` + Replace Resource `json:"replace,omitempty"` +} + +type KernelArgument string + +type KernelArguments struct { + ShouldExist []KernelArgument `json:"shouldExist,omitempty"` + ShouldNotExist []KernelArgument `json:"shouldNotExist,omitempty"` +} + +type Link struct { + Node + LinkEmbedded1 +} + +type LinkEmbedded1 struct { + Hard *bool `json:"hard,omitempty"` + Target *string `json:"target,omitempty"` +} + +type Luks struct { + Clevis Clevis `json:"clevis,omitempty"` + Device *string `json:"device,omitempty"` + KeyFile Resource `json:"keyFile,omitempty"` + Label *string `json:"label,omitempty"` + Name string `json:"name"` + Options []LuksOption `json:"options,omitempty"` + UUID *string `json:"uuid,omitempty"` + WipeVolume *bool `json:"wipeVolume,omitempty"` +} + +type LuksOption string + +type MountOption string + +type NoProxyItem string + +type Node struct { + Group NodeGroup `json:"group,omitempty"` + Overwrite *bool `json:"overwrite,omitempty"` + Path string `json:"path"` + User NodeUser `json:"user,omitempty"` +} + +type NodeGroup struct { + ID *int `json:"id,omitempty"` + Name *string `json:"name,omitempty"` +} + +type NodeUser struct { + ID *int `json:"id,omitempty"` + Name *string `json:"name,omitempty"` +} + +type Partition struct { + GUID *string `json:"guid,omitempty"` + Label *string `json:"label,omitempty"` + Number int `json:"number,omitempty"` + Resize *bool `json:"resize,omitempty"` + ShouldExist *bool `json:"shouldExist,omitempty"` + SizeMiB *int `json:"sizeMiB,omitempty"` + StartMiB *int `json:"startMiB,omitempty"` + TypeGUID *string `json:"typeGuid,omitempty"` + WipePartitionEntry *bool `json:"wipePartitionEntry,omitempty"` +} + +type Passwd struct { + Groups []PasswdGroup `json:"groups,omitempty"` + Users []PasswdUser `json:"users,omitempty"` +} + +type PasswdGroup struct { + Gid *int `json:"gid,omitempty"` + Name string `json:"name"` + PasswordHash *string `json:"passwordHash,omitempty"` + ShouldExist *bool `json:"shouldExist,omitempty"` + System *bool `json:"system,omitempty"` +} + +type PasswdUser struct { + Gecos *string `json:"gecos,omitempty"` + Groups []Group `json:"groups,omitempty"` + HomeDir *string `json:"homeDir,omitempty"` + Name string `json:"name"` + NoCreateHome *bool `json:"noCreateHome,omitempty"` + NoLogInit *bool `json:"noLogInit,omitempty"` + NoUserGroup *bool `json:"noUserGroup,omitempty"` + PasswordHash *string `json:"passwordHash,omitempty"` + PrimaryGroup *string `json:"primaryGroup,omitempty"` + SSHAuthorizedKeys []SSHAuthorizedKey `json:"sshAuthorizedKeys,omitempty"` + Shell *string `json:"shell,omitempty"` + ShouldExist *bool `json:"shouldExist,omitempty"` + System *bool `json:"system,omitempty"` + UID *int `json:"uid,omitempty"` +} + +type Proxy struct { + HTTPProxy *string `json:"httpProxy,omitempty"` + HTTPSProxy *string `json:"httpsProxy,omitempty"` + NoProxy []NoProxyItem `json:"noProxy,omitempty"` +} + +type Raid struct { + Devices []Device `json:"devices,omitempty"` + Level *string `json:"level,omitempty"` + Name string `json:"name"` + Options []RaidOption `json:"options,omitempty"` + Spares *int `json:"spares,omitempty"` +} + +type RaidOption string + +type Resource struct { + Compression *string `json:"compression,omitempty"` + HTTPHeaders HTTPHeaders `json:"httpHeaders,omitempty"` + Source *string `json:"source,omitempty"` + Verification Verification `json:"verification,omitempty"` +} + +type SSHAuthorizedKey string + +type Security struct { + TLS TLS `json:"tls,omitempty"` +} + +type Storage struct { + Directories []Directory `json:"directories,omitempty"` + Disks []Disk `json:"disks,omitempty"` + Files []File `json:"files,omitempty"` + Filesystems []Filesystem `json:"filesystems,omitempty"` + Links []Link `json:"links,omitempty"` + Luks []Luks `json:"luks,omitempty"` + Raid []Raid `json:"raid,omitempty"` +} + +type Systemd struct { + Units []Unit `json:"units,omitempty"` +} + +type TLS struct { + CertificateAuthorities []Resource `json:"certificateAuthorities,omitempty"` +} + +type Tang struct { + Thumbprint *string `json:"thumbprint,omitempty"` + URL string `json:"url,omitempty"` +} + +type Timeouts struct { + HTTPResponseHeaders *int `json:"httpResponseHeaders,omitempty"` + HTTPTotal *int `json:"httpTotal,omitempty"` +} + +type Unit struct { + Contents *string `json:"contents,omitempty"` + Dropins []Dropin `json:"dropins,omitempty"` + Enabled *bool `json:"enabled,omitempty"` + Mask *bool `json:"mask,omitempty"` + Name string `json:"name"` +} + +type Verification struct { + Hash *string `json:"hash,omitempty"` +} diff --git a/vendor/github.com/coreos/ignition/v2/config/v3_3/types/storage.go b/vendor/github.com/coreos/ignition/v2/config/v3_3/types/storage.go new file mode 100644 index 000000000..20cb73048 --- /dev/null +++ b/vendor/github.com/coreos/ignition/v2/config/v3_3/types/storage.go @@ -0,0 +1,115 @@ +// Copyright 2020 Red Hat, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "path" + "strings" + + "github.com/coreos/ignition/v2/config/shared/errors" + "github.com/coreos/ignition/v2/config/util" + + vpath "github.com/coreos/vcontext/path" + "github.com/coreos/vcontext/report" +) + +func (s Storage) MergedKeys() map[string]string { + return map[string]string{ + "Directories": "Node", + "Files": "Node", + "Links": "Node", + } +} + +func (s Storage) Validate(c vpath.ContextPath) (r report.Report) { + s.validateDirectories(c, &r) + s.validateFiles(c, &r) + s.validateLinks(c, &r) + s.validateFilesystems(c, &r) + return +} + +func (s Storage) validateDirectories(c vpath.ContextPath, r *report.Report) { + for i, d := range s.Directories { + for _, l := range s.Links { + if strings.HasPrefix(d.Path, l.Path+"/") { + r.AddOnError(c.Append("directories", i), errors.ErrDirectoryUsedSymlink) + } + } + } +} + +func (s Storage) validateFiles(c vpath.ContextPath, r *report.Report) { + for i, f := range s.Files { + for _, l := range s.Links { + if strings.HasPrefix(f.Path, l.Path+"/") { + r.AddOnError(c.Append("files", i), errors.ErrFileUsedSymlink) + } + } + } +} + +func (s Storage) validateLinks(c vpath.ContextPath, r *report.Report) { + for i, l1 := range s.Links { + for _, l2 := range s.Links { + if strings.HasPrefix(l1.Path, l2.Path+"/") { + r.AddOnError(c.Append("links", i), errors.ErrLinkUsedSymlink) + } + } + if util.NilOrEmpty(l1.Target) { + r.AddOnError(c.Append("links", i, "target"), errors.ErrLinkTargetRequired) + continue + } + if !util.IsTrue(l1.Hard) { + continue + } + target := path.Clean(*l1.Target) + if !path.IsAbs(target) { + target = path.Join(l1.Path, *l1.Target) + } + for _, d := range s.Directories { + if target == d.Path { + r.AddOnError(c.Append("links", i), errors.ErrHardLinkToDirectory) + } + } + ownerCheck := func(ok bool, path vpath.ContextPath) { + if !ok { + r.AddOnWarn(path, errors.ErrHardLinkSpecifiesOwner) + } + } + ownerCheck(l1.User.ID == nil, c.Append("links", i, "user", "id")) + ownerCheck(l1.User.Name == nil, c.Append("links", i, "user", "name")) + ownerCheck(l1.Group.ID == nil, c.Append("links", i, "group", "id")) + ownerCheck(l1.Group.Name == nil, c.Append("links", i, "group", "name")) + } +} + +func (s Storage) validateFilesystems(c vpath.ContextPath, r *report.Report) { + disks := make(map[string]Disk) + for _, d := range s.Disks { + disks[d.Device] = d + } + + for i, f := range s.Filesystems { + disk, exist := disks[f.Device] + if exist { + if len(disk.Partitions) > 0 { + r.AddOnWarn(c.Append("filesystems", i, "device"), errors.ErrPartitionsOverwritten) + } else if !util.IsTrue(f.WipeFilesystem) && util.IsTrue(disk.WipeTable) { + r.AddOnWarn(c.Append("filesystems", i, "device"), errors.ErrFilesystemImplicitWipe) + } + } + } +} diff --git a/vendor/github.com/coreos/ignition/v2/config/v3_3/types/systemd.go b/vendor/github.com/coreos/ignition/v2/config/v3_3/types/systemd.go new file mode 100644 index 000000000..ac521ba73 --- /dev/null +++ b/vendor/github.com/coreos/ignition/v2/config/v3_3/types/systemd.go @@ -0,0 +1,61 @@ +// Copyright 2022 Red Hat, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "regexp" + + "github.com/coreos/ignition/v2/config/shared/errors" + "github.com/coreos/ignition/v2/config/shared/parse" + "github.com/coreos/ignition/v2/config/util" + + vpath "github.com/coreos/vcontext/path" + "github.com/coreos/vcontext/report" +) + +func (s Systemd) Validate(c vpath.ContextPath) (r report.Report) { + units := make(map[string]Unit) + checkInstanceUnit := regexp.MustCompile(`^(.+?)@(.+?)\.service$`) + for _, d := range s.Units { + units[d.Name] = d + } + for index, unit := range s.Units { + if checkInstanceUnit.MatchString(unit.Name) && util.IsTrue(unit.Enabled) { + instUnitSlice := checkInstanceUnit.FindSubmatch([]byte(unit.Name)) + instantiableUnit := string(instUnitSlice[1]) + "@.service" + if _, ok := units[instantiableUnit]; ok && util.NotEmpty(units[instantiableUnit].Contents) { + foundInstallSection := false + // we're doing a separate validation pass on each unit to identify + // if an instantiable unit has the install section. So logging an + // `AddOnError` will produce duplicate errors on bad unit contents + // because we're already doing that while validating a unit separately. + opts, err := parse.ParseUnitContents(units[instantiableUnit].Contents) + if err != nil { + continue + } + for _, section := range opts { + if section.Section == "Install" { + foundInstallSection = true + break + } + } + if !foundInstallSection { + r.AddOnWarn(c.Append("units", index, "contents"), errors.NewNoInstallSectionForInstantiableUnitError(instantiableUnit, unit.Name)) + } + } + } + } + return +} diff --git a/vendor/github.com/coreos/ignition/v2/config/v3_3/types/tang.go b/vendor/github.com/coreos/ignition/v2/config/v3_3/types/tang.go new file mode 100644 index 000000000..86ab79c9e --- /dev/null +++ b/vendor/github.com/coreos/ignition/v2/config/v3_3/types/tang.go @@ -0,0 +1,51 @@ +// Copyright 2020 Red Hat, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "net/url" + + "github.com/coreos/ignition/v2/config/shared/errors" + "github.com/coreos/ignition/v2/config/util" + + "github.com/coreos/vcontext/path" + "github.com/coreos/vcontext/report" +) + +func (t Tang) Key() string { + return t.URL +} + +func (t Tang) Validate(c path.ContextPath) (r report.Report) { + r.AddOnError(c.Append("url"), validateTangURL(t.URL)) + if util.NilOrEmpty(t.Thumbprint) { + r.AddOnError(c.Append("thumbprint"), errors.ErrTangThumbprintRequired) + } + return +} + +func validateTangURL(s string) error { + u, err := url.Parse(s) + if err != nil { + return errors.ErrInvalidUrl + } + + switch u.Scheme { + case "http", "https": + return nil + default: + return errors.ErrInvalidScheme + } +} diff --git a/vendor/github.com/coreos/ignition/v2/config/v3_3/types/tls.go b/vendor/github.com/coreos/ignition/v2/config/v3_3/types/tls.go new file mode 100644 index 000000000..8890e397e --- /dev/null +++ b/vendor/github.com/coreos/ignition/v2/config/v3_3/types/tls.go @@ -0,0 +1,27 @@ +// Copyright 2020 Red Hat, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "github.com/coreos/vcontext/path" + "github.com/coreos/vcontext/report" +) + +func (tls TLS) Validate(c path.ContextPath) (r report.Report) { + for i, ca := range tls.CertificateAuthorities { + r.AddOnError(c.Append("certificateAuthorities", i), ca.validateRequiredSource()) + } + return +} diff --git a/vendor/github.com/coreos/ignition/v2/config/v3_3/types/unit.go b/vendor/github.com/coreos/ignition/v2/config/v3_3/types/unit.go new file mode 100644 index 000000000..c5ee1e8e3 --- /dev/null +++ b/vendor/github.com/coreos/ignition/v2/config/v3_3/types/unit.go @@ -0,0 +1,68 @@ +// Copyright 2020 Red Hat, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "path" + + "github.com/coreos/ignition/v2/config/shared/errors" + "github.com/coreos/ignition/v2/config/shared/parse" + "github.com/coreos/ignition/v2/config/shared/validations" + "github.com/coreos/ignition/v2/config/util" + + cpath "github.com/coreos/vcontext/path" + "github.com/coreos/vcontext/report" +) + +func (u Unit) Key() string { + return u.Name +} + +func (d Dropin) Key() string { + return d.Name +} + +func (u Unit) Validate(c cpath.ContextPath) (r report.Report) { + r.AddOnError(c.Append("name"), validateName(u.Name)) + c = c.Append("contents") + opts, err := parse.ParseUnitContents(u.Contents) + r.AddOnError(c, err) + + r.AddOnWarn(c, validations.ValidateInstallSection(u.Name, util.IsTrue(u.Enabled), util.NilOrEmpty(u.Contents), opts)) + + return +} + +func validateName(name string) error { + switch path.Ext(name) { + case ".service", ".socket", ".device", ".mount", ".automount", ".swap", ".target", ".path", ".timer", ".snapshot", ".slice", ".scope": + default: + return errors.ErrInvalidSystemdExt + } + return nil +} + +func (d Dropin) Validate(c cpath.ContextPath) (r report.Report) { + _, err := parse.ParseUnitContents(d.Contents) + r.AddOnError(c.Append("contents"), err) + + switch path.Ext(d.Name) { + case ".conf": + default: + r.AddOnError(c.Append("name"), errors.ErrInvalidSystemdDropinExt) + } + + return +} diff --git a/vendor/github.com/coreos/ignition/v2/config/v3_3/types/url.go b/vendor/github.com/coreos/ignition/v2/config/v3_3/types/url.go new file mode 100644 index 000000000..0d8771bf6 --- /dev/null +++ b/vendor/github.com/coreos/ignition/v2/config/v3_3/types/url.go @@ -0,0 +1,57 @@ +// Copyright 2020 Red Hat, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "net/url" + + "github.com/vincent-petithory/dataurl" + + "github.com/coreos/ignition/v2/config/shared/errors" + "github.com/coreos/ignition/v2/config/util" +) + +func validateURL(s string) error { + u, err := url.Parse(s) + if err != nil { + return errors.ErrInvalidUrl + } + + switch u.Scheme { + case "http", "https", "tftp", "gs": + return nil + case "s3": + if v, ok := u.Query()["versionId"]; ok { + if len(v) == 0 || v[0] == "" { + return errors.ErrInvalidS3ObjectVersionId + } + } + return nil + case "data": + if _, err := dataurl.DecodeString(s); err != nil { + return err + } + return nil + default: + return errors.ErrInvalidScheme + } +} + +func validateURLNilOK(s *string) error { + if util.NilOrEmpty(s) { + return nil + } + return validateURL(*s) +} diff --git a/vendor/github.com/coreos/ignition/v2/config/v3_3/types/verification.go b/vendor/github.com/coreos/ignition/v2/config/v3_3/types/verification.go new file mode 100644 index 000000000..5def6f04b --- /dev/null +++ b/vendor/github.com/coreos/ignition/v2/config/v3_3/types/verification.go @@ -0,0 +1,71 @@ +// Copyright 2020 Red Hat, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "crypto" + "encoding/hex" + "strings" + + "github.com/coreos/ignition/v2/config/shared/errors" + + "github.com/coreos/vcontext/path" + "github.com/coreos/vcontext/report" +) + +// HashParts will return the sum and function (in that order) of the hash stored +// in this Verification, or an error if there is an issue during parsing. +func (v Verification) HashParts() (string, string, error) { + if v.Hash == nil { + // The hash can be nil + return "", "", nil + } + parts := strings.SplitN(*v.Hash, "-", 2) + if len(parts) != 2 { + return "", "", errors.ErrHashMalformed + } + + return parts[0], parts[1], nil +} + +func (v Verification) Validate(c path.ContextPath) (r report.Report) { + c = c.Append("hash") + if v.Hash == nil { + // The hash can be nil + return + } + + function, sum, err := v.HashParts() + if err != nil { + r.AddOnError(c, err) + return + } + var hash crypto.Hash + switch function { + case "sha512": + hash = crypto.SHA512 + case "sha256": + hash = crypto.SHA256 + default: + r.AddOnError(c, errors.ErrHashUnrecognized) + return + } + + if len(sum) != hex.EncodedLen(hash.Size()) { + r.AddOnError(c, errors.ErrHashWrongSize) + } + + return +} diff --git a/vendor/github.com/coreos/ignition/v2/config/v3_4/config.go b/vendor/github.com/coreos/ignition/v2/config/v3_4/config.go new file mode 100644 index 000000000..e83abb0bb --- /dev/null +++ b/vendor/github.com/coreos/ignition/v2/config/v3_4/config.go @@ -0,0 +1,78 @@ +// Copyright 2020 Red Hat, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v3_4 + +import ( + "github.com/coreos/ignition/v2/config/merge" + "github.com/coreos/ignition/v2/config/shared/errors" + "github.com/coreos/ignition/v2/config/util" + prev "github.com/coreos/ignition/v2/config/v3_3" + "github.com/coreos/ignition/v2/config/v3_4/translate" + "github.com/coreos/ignition/v2/config/v3_4/types" + "github.com/coreos/ignition/v2/config/validate" + + "github.com/coreos/go-semver/semver" + "github.com/coreos/vcontext/report" +) + +func Merge(parent, child types.Config) types.Config { + res, _ := merge.MergeStructTranscribe(parent, child) + return res.(types.Config) +} + +// Parse parses the raw config into a types.Config struct and generates a report of any +// errors, warnings, info, and deprecations it encountered +func Parse(rawConfig []byte) (types.Config, report.Report, error) { + if len(rawConfig) == 0 { + return types.Config{}, report.Report{}, errors.ErrEmpty + } + + var config types.Config + if rpt, err := util.HandleParseErrors(rawConfig, &config); err != nil { + return types.Config{}, rpt, err + } + + version, err := semver.NewVersion(config.Ignition.Version) + + if err != nil || *version != types.MaxVersion { + return types.Config{}, report.Report{}, errors.ErrUnknownVersion + } + + rpt := validate.ValidateWithContext(config, rawConfig) + if rpt.IsFatal() { + return types.Config{}, rpt, errors.ErrInvalid + } + + return config, rpt, nil +} + +// ParseCompatibleVersion parses the raw config of version 3.4.0 or +// lesser into a 3.4 types.Config struct and generates a report of any errors, +// warnings, info, and deprecations it encountered +func ParseCompatibleVersion(raw []byte) (types.Config, report.Report, error) { + version, rpt, err := util.GetConfigVersion(raw) + if err != nil { + return types.Config{}, rpt, err + } + + if version == types.MaxVersion { + return Parse(raw) + } + prevCfg, r, err := prev.ParseCompatibleVersion(raw) + if err != nil { + return types.Config{}, r, err + } + return translate.Translate(prevCfg), r, nil +} diff --git a/vendor/github.com/coreos/ignition/v2/config/v3_4/translate/translate.go b/vendor/github.com/coreos/ignition/v2/config/v3_4/translate/translate.go new file mode 100644 index 000000000..5d748d829 --- /dev/null +++ b/vendor/github.com/coreos/ignition/v2/config/v3_4/translate/translate.go @@ -0,0 +1,85 @@ +// Copyright 2020 Red Hat, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package translate + +import ( + "github.com/coreos/ignition/v2/config/translate" + "github.com/coreos/ignition/v2/config/util" + old_types "github.com/coreos/ignition/v2/config/v3_3/types" + "github.com/coreos/ignition/v2/config/v3_4/types" +) + +func translateIgnition(old old_types.Ignition) (ret types.Ignition) { + // use a new translator so we don't recurse infinitely + translate.NewTranslator().Translate(&old, &ret) + ret.Version = types.MaxVersion.String() + return +} + +func translateFileEmbedded1(old old_types.FileEmbedded1) (ret types.FileEmbedded1) { + tr := translate.NewTranslator() + tr.Translate(&old.Append, &ret.Append) + tr.Translate(&old.Contents, &ret.Contents) + if old.Mode != nil { + // We support the special mode bits for specs >=3.4.0, so if + // the user provides special mode bits in an Ignition config + // with the version < 3.4.0, then we need to explicitly mask + // those bits out during translation. + ret.Mode = util.IntToPtr(*old.Mode & ^07000) + } + return +} + +func translateDirectoryEmbedded1(old old_types.DirectoryEmbedded1) (ret types.DirectoryEmbedded1) { + if old.Mode != nil { + // We support the special mode bits for specs >=3.4.0, so if + // the user provides special mode bits in an Ignition config + // with the version < 3.4.0, then we need to explicitly mask + // those bits out during translation. + ret.Mode = util.IntToPtr(*old.Mode & ^07000) + } + return +} + +func translateLuks(old old_types.Luks) (ret types.Luks) { + tr := translate.NewTranslator() + tr.AddCustomTranslator(translateTang) + tr.Translate(&old.Clevis, &ret.Clevis) + tr.Translate(&old.Device, &ret.Device) + tr.Translate(&old.KeyFile, &ret.KeyFile) + tr.Translate(&old.Label, &ret.Label) + tr.Translate(&old.Name, &ret.Name) + tr.Translate(&old.Options, &ret.Options) + tr.Translate(&old.UUID, &ret.UUID) + tr.Translate(&old.WipeVolume, &ret.WipeVolume) + return +} + +func translateTang(old old_types.Tang) (ret types.Tang) { + tr := translate.NewTranslator() + tr.Translate(&old.Thumbprint, &ret.Thumbprint) + tr.Translate(&old.URL, &ret.URL) + return +} + +func Translate(old old_types.Config) (ret types.Config) { + tr := translate.NewTranslator() + tr.AddCustomTranslator(translateIgnition) + tr.AddCustomTranslator(translateDirectoryEmbedded1) + tr.AddCustomTranslator(translateFileEmbedded1) + tr.AddCustomTranslator(translateLuks) + tr.Translate(&old, &ret) + return +} diff --git a/vendor/github.com/coreos/ignition/v2/config/v3_4/types/clevis.go b/vendor/github.com/coreos/ignition/v2/config/v3_4/types/clevis.go new file mode 100644 index 000000000..68887d434 --- /dev/null +++ b/vendor/github.com/coreos/ignition/v2/config/v3_4/types/clevis.go @@ -0,0 +1,49 @@ +// Copyright 2020 Red Hat, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "github.com/coreos/ignition/v2/config/shared/errors" + "github.com/coreos/ignition/v2/config/util" + + "github.com/coreos/vcontext/path" + "github.com/coreos/vcontext/report" +) + +func (c Clevis) IsPresent() bool { + return util.NotEmpty(c.Custom.Pin) || + len(c.Tang) > 0 || + util.IsTrue(c.Tpm2) || + c.Threshold != nil && *c.Threshold != 0 +} + +func (cu ClevisCustom) Validate(c path.ContextPath) (r report.Report) { + if util.NilOrEmpty(cu.Pin) && util.NilOrEmpty(cu.Config) && !util.IsTrue(cu.NeedsNetwork) { + return + } + if util.NotEmpty(cu.Pin) { + switch *cu.Pin { + case "tpm2", "tang", "sss": + default: + r.AddOnError(c.Append("pin"), errors.ErrUnknownClevisPin) + } + } else { + r.AddOnError(c.Append("pin"), errors.ErrClevisPinRequired) + } + if util.NilOrEmpty(cu.Config) { + r.AddOnError(c.Append("config"), errors.ErrClevisConfigRequired) + } + return +} diff --git a/vendor/github.com/coreos/ignition/v2/config/v3_4/types/config.go b/vendor/github.com/coreos/ignition/v2/config/v3_4/types/config.go new file mode 100644 index 000000000..4f51f3c6c --- /dev/null +++ b/vendor/github.com/coreos/ignition/v2/config/v3_4/types/config.go @@ -0,0 +1,64 @@ +// Copyright 2020 Red Hat, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "github.com/coreos/ignition/v2/config/shared/errors" + "github.com/coreos/ignition/v2/config/util" + + "github.com/coreos/go-semver/semver" + "github.com/coreos/vcontext/path" + "github.com/coreos/vcontext/report" +) + +var ( + MaxVersion = semver.Version{ + Major: 3, + Minor: 4, + } +) + +func (cfg Config) Validate(c path.ContextPath) (r report.Report) { + systemdPath := "/etc/systemd/system/" + unitPaths := map[string]struct{}{} + for _, unit := range cfg.Systemd.Units { + if !util.NilOrEmpty(unit.Contents) { + pathString := systemdPath + unit.Name + unitPaths[pathString] = struct{}{} + } + for _, dropin := range unit.Dropins { + if !util.NilOrEmpty(dropin.Contents) { + pathString := systemdPath + unit.Name + ".d/" + dropin.Name + unitPaths[pathString] = struct{}{} + } + } + } + for i, f := range cfg.Storage.Files { + if _, exists := unitPaths[f.Path]; exists { + r.AddOnError(c.Append("storage", "files", i, "path"), errors.ErrPathConflictsSystemd) + } + } + for i, d := range cfg.Storage.Directories { + if _, exists := unitPaths[d.Path]; exists { + r.AddOnError(c.Append("storage", "directories", i, "path"), errors.ErrPathConflictsSystemd) + } + } + for i, l := range cfg.Storage.Links { + if _, exists := unitPaths[l.Path]; exists { + r.AddOnError(c.Append("storage", "links", i, "path"), errors.ErrPathConflictsSystemd) + } + } + return +} diff --git a/vendor/github.com/coreos/ignition/v2/config/v3_4/types/device.go b/vendor/github.com/coreos/ignition/v2/config/v3_4/types/device.go new file mode 100644 index 000000000..a10ce97b0 --- /dev/null +++ b/vendor/github.com/coreos/ignition/v2/config/v3_4/types/device.go @@ -0,0 +1,25 @@ +// Copyright 2020 Red Hat, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "github.com/coreos/vcontext/path" + "github.com/coreos/vcontext/report" +) + +func (d Device) Validate(c path.ContextPath) (r report.Report) { + r.AddOnError(c, validatePath(string(d))) + return +} diff --git a/vendor/github.com/coreos/ignition/v2/config/v3_4/types/directory.go b/vendor/github.com/coreos/ignition/v2/config/v3_4/types/directory.go new file mode 100644 index 000000000..f6f068455 --- /dev/null +++ b/vendor/github.com/coreos/ignition/v2/config/v3_4/types/directory.go @@ -0,0 +1,26 @@ +// Copyright 2020 Red Hat, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "github.com/coreos/vcontext/path" + "github.com/coreos/vcontext/report" +) + +func (d Directory) Validate(c path.ContextPath) (r report.Report) { + r.Merge(d.Node.Validate(c)) + r.AddOnError(c.Append("mode"), validateMode(d.Mode)) + return +} diff --git a/vendor/github.com/coreos/ignition/v2/config/v3_4/types/disk.go b/vendor/github.com/coreos/ignition/v2/config/v3_4/types/disk.go new file mode 100644 index 000000000..8caf8499d --- /dev/null +++ b/vendor/github.com/coreos/ignition/v2/config/v3_4/types/disk.go @@ -0,0 +1,135 @@ +// Copyright 2020 Red Hat, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "github.com/coreos/ignition/v2/config/shared/errors" + "github.com/coreos/ignition/v2/config/util" + + "github.com/coreos/vcontext/path" + "github.com/coreos/vcontext/report" +) + +func (d Disk) Key() string { + return d.Device +} + +func (n Disk) Validate(c path.ContextPath) (r report.Report) { + if len(n.Device) == 0 { + r.AddOnError(c.Append("device"), errors.ErrDiskDeviceRequired) + return + } + r.AddOnError(c.Append("device"), validatePath(n.Device)) + + if collides, p := n.partitionNumbersCollide(); collides { + r.AddOnError(c.Append("partitions", p), errors.ErrPartitionNumbersCollide) + } + if overlaps, p := n.partitionsOverlap(); overlaps { + r.AddOnError(c.Append("partitions", p), errors.ErrPartitionsOverlap) + } + if n.partitionsMixZeroesAndNonexistence() { + r.AddOnError(c.Append("partitions"), errors.ErrZeroesWithShouldNotExist) + } + if collides, p := n.partitionLabelsCollide(); collides { + r.AddOnError(c.Append("partitions", p), errors.ErrDuplicateLabels) + } + return +} + +// partitionNumbersCollide returns true if partition numbers in n.Partitions are not unique. It also returns the +// index of the colliding partition +func (n Disk) partitionNumbersCollide() (bool, int) { + m := map[int][]int{} // from partition number to index into array + for i, p := range n.Partitions { + if p.Number != 0 { + // a number of 0 means next available number, multiple devices can specify this + m[p.Number] = append(m[p.Number], i) + } + } + for _, n := range m { + if len(n) > 1 { + // TODO(vc): return information describing the collision for logging + return true, n[1] + } + } + return false, 0 +} + +func (d Disk) partitionLabelsCollide() (bool, int) { + m := map[string]struct{}{} + for i, p := range d.Partitions { + if p.Label != nil { + // a number of 0 means next available number, multiple devices can specify this + if _, exists := m[*p.Label]; exists { + return true, i + } + m[*p.Label] = struct{}{} + } + } + return false, 0 +} + +// end returns the last sector of a partition. Only used by partitionsOverlap. Requires non-nil Start and Size. +func (p Partition) end() int { + if *p.SizeMiB == 0 { + // a size of 0 means "fill available", just return the start as the end for those. + return *p.StartMiB + } + return *p.StartMiB + *p.SizeMiB - 1 +} + +// partitionsOverlap returns true if any explicitly dimensioned partitions overlap. It also returns the index of +// the overlapping partition +func (n Disk) partitionsOverlap() (bool, int) { + for _, p := range n.Partitions { + // Starts of 0 are placed by sgdisk into the "largest available block" at that time. + // We aren't going to check those for overlap since we don't have the disk geometry. + if p.StartMiB == nil || p.SizeMiB == nil || *p.StartMiB == 0 { + continue + } + + for i, o := range n.Partitions { + if o.StartMiB == nil || o.SizeMiB == nil || p == o || *o.StartMiB == 0 { + continue + } + + // is p.StartMiB within o? + if *p.StartMiB >= *o.StartMiB && *p.StartMiB <= o.end() { + return true, i + } + + // is p.end() within o? + if p.end() >= *o.StartMiB && p.end() <= o.end() { + return true, i + } + + // do p.StartMiB and p.end() straddle o? + if *p.StartMiB < *o.StartMiB && p.end() > o.end() { + return true, i + } + } + } + return false, 0 +} + +func (n Disk) partitionsMixZeroesAndNonexistence() bool { + hasZero := false + hasShouldNotExist := false + for _, p := range n.Partitions { + hasShouldNotExist = hasShouldNotExist || util.IsFalse(p.ShouldExist) + hasZero = hasZero || (p.Number == 0) + } + return hasZero && hasShouldNotExist +} diff --git a/vendor/github.com/coreos/ignition/v2/config/v3_4/types/file.go b/vendor/github.com/coreos/ignition/v2/config/v3_4/types/file.go new file mode 100644 index 000000000..9b71bb26a --- /dev/null +++ b/vendor/github.com/coreos/ignition/v2/config/v3_4/types/file.go @@ -0,0 +1,43 @@ +// Copyright 2020 Red Hat, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "github.com/coreos/ignition/v2/config/shared/errors" + "github.com/coreos/ignition/v2/config/util" + + "github.com/coreos/vcontext/path" + "github.com/coreos/vcontext/report" +) + +func (f File) Validate(c path.ContextPath) (r report.Report) { + r.Merge(f.Node.Validate(c)) + r.AddOnError(c.Append("mode"), validateMode(f.Mode)) + r.AddOnError(c.Append("overwrite"), f.validateOverwrite()) + return +} + +func (f File) validateOverwrite() error { + if util.IsTrue(f.Overwrite) && f.Contents.Source == nil { + return errors.ErrOverwriteAndNilSource + } + return nil +} + +func (f FileEmbedded1) IgnoreDuplicates() map[string]struct{} { + return map[string]struct{}{ + "Append": {}, + } +} diff --git a/vendor/github.com/coreos/ignition/v2/config/v3_4/types/filesystem.go b/vendor/github.com/coreos/ignition/v2/config/v3_4/types/filesystem.go new file mode 100644 index 000000000..c722b3633 --- /dev/null +++ b/vendor/github.com/coreos/ignition/v2/config/v3_4/types/filesystem.go @@ -0,0 +1,106 @@ +// Copyright 2020 Red Hat, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "github.com/coreos/ignition/v2/config/shared/errors" + "github.com/coreos/ignition/v2/config/util" + + "github.com/coreos/vcontext/path" + "github.com/coreos/vcontext/report" +) + +func (f Filesystem) Key() string { + return f.Device +} + +func (f Filesystem) IgnoreDuplicates() map[string]struct{} { + return map[string]struct{}{ + "Options": {}, + "MountOptions": {}, + } +} + +func (f Filesystem) Validate(c path.ContextPath) (r report.Report) { + r.AddOnError(c.Append("path"), f.validatePath()) + r.AddOnError(c.Append("device"), validatePath(f.Device)) + r.AddOnError(c.Append("format"), f.validateFormat()) + r.AddOnError(c.Append("label"), f.validateLabel()) + return +} + +func (f Filesystem) validatePath() error { + return validatePathNilOK(f.Path) +} + +func (f Filesystem) validateFormat() error { + if util.NilOrEmpty(f.Format) { + if util.NotEmpty(f.Path) || + util.NotEmpty(f.Label) || + util.NotEmpty(f.UUID) || + util.IsTrue(f.WipeFilesystem) || + len(f.MountOptions) != 0 || + len(f.Options) != 0 { + return errors.ErrFormatNilWithOthers + } + } else { + switch *f.Format { + case "ext4", "btrfs", "xfs", "swap", "vfat", "none": + default: + return errors.ErrFilesystemInvalidFormat + } + } + return nil +} + +func (f Filesystem) validateLabel() error { + if util.NilOrEmpty(f.Label) { + return nil + } + if util.NilOrEmpty(f.Format) { + return errors.ErrLabelNeedsFormat + } + + switch *f.Format { + case "ext4": + if len(*f.Label) > 16 { + // source: man mkfs.ext4 + return errors.ErrExt4LabelTooLong + } + case "btrfs": + if len(*f.Label) > 256 { + // source: man mkfs.btrfs + return errors.ErrBtrfsLabelTooLong + } + case "xfs": + if len(*f.Label) > 12 { + // source: man mkfs.xfs + return errors.ErrXfsLabelTooLong + } + case "swap": + // mkswap's man page does not state a limit on label size, but through + // experimentation it appears that mkswap will truncate long labels to + // 15 characters, so let's enforce that. + if len(*f.Label) > 15 { + return errors.ErrSwapLabelTooLong + } + case "vfat": + if len(*f.Label) > 11 { + // source: man mkfs.fat + return errors.ErrVfatLabelTooLong + } + } + return nil +} diff --git a/vendor/github.com/coreos/ignition/v2/config/v3_4/types/headers.go b/vendor/github.com/coreos/ignition/v2/config/v3_4/types/headers.go new file mode 100644 index 000000000..be1aadad9 --- /dev/null +++ b/vendor/github.com/coreos/ignition/v2/config/v3_4/types/headers.go @@ -0,0 +1,65 @@ +// Copyright 2020 Red Hat, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "net/http" + + "github.com/coreos/ignition/v2/config/shared/errors" + "github.com/coreos/vcontext/path" + "github.com/coreos/vcontext/report" +) + +// Parse generates standard net/http headers from the data in HTTPHeaders +func (hs HTTPHeaders) Parse() (http.Header, error) { + headers := http.Header{} + for _, header := range hs { + if header.Name == "" { + return nil, errors.ErrEmptyHTTPHeaderName + } + if header.Value == nil || string(*header.Value) == "" { + return nil, errors.ErrInvalidHTTPHeader + } + headers.Add(header.Name, string(*header.Value)) + } + return headers, nil +} + +func (h HTTPHeader) Validate(c path.ContextPath) (r report.Report) { + r.AddOnError(c.Append("name"), h.validateName()) + r.AddOnError(c.Append("value"), h.validateValue()) + return +} + +func (h HTTPHeader) validateName() error { + if h.Name == "" { + return errors.ErrEmptyHTTPHeaderName + } + return nil +} + +func (h HTTPHeader) validateValue() error { + if h.Value == nil { + return nil + } + if string(*h.Value) == "" { + return errors.ErrInvalidHTTPHeader + } + return nil +} + +func (h HTTPHeader) Key() string { + return h.Name +} diff --git a/vendor/github.com/coreos/ignition/v2/config/v3_4/types/ignition.go b/vendor/github.com/coreos/ignition/v2/config/v3_4/types/ignition.go new file mode 100644 index 000000000..190445bda --- /dev/null +++ b/vendor/github.com/coreos/ignition/v2/config/v3_4/types/ignition.go @@ -0,0 +1,49 @@ +// Copyright 2020 Red Hat, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "github.com/coreos/go-semver/semver" + + "github.com/coreos/ignition/v2/config/shared/errors" + + "github.com/coreos/vcontext/path" + "github.com/coreos/vcontext/report" +) + +func (v Ignition) Semver() (*semver.Version, error) { + return semver.NewVersion(v.Version) +} + +func (ic IgnitionConfig) Validate(c path.ContextPath) (r report.Report) { + for i, res := range ic.Merge { + r.AddOnError(c.Append("merge", i), res.validateRequiredSource()) + } + return +} + +func (v Ignition) Validate(c path.ContextPath) (r report.Report) { + c = c.Append("version") + tv, err := v.Semver() + if err != nil { + r.AddOnError(c, errors.ErrInvalidVersion) + return + } + + if MaxVersion != *tv { + r.AddOnError(c, errors.ErrUnknownVersion) + } + return +} diff --git a/vendor/github.com/coreos/ignition/v2/config/v3_4/types/kargs.go b/vendor/github.com/coreos/ignition/v2/config/v3_4/types/kargs.go new file mode 100644 index 000000000..42c29408e --- /dev/null +++ b/vendor/github.com/coreos/ignition/v2/config/v3_4/types/kargs.go @@ -0,0 +1,22 @@ +// Copyright 2021 Red Hat, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +func (k KernelArguments) MergedKeys() map[string]string { + return map[string]string{ + "ShouldExist": "KernelArgument", + "ShouldNotExist": "KernelArgument", + } +} diff --git a/vendor/github.com/coreos/ignition/v2/config/v3_4/types/luks.go b/vendor/github.com/coreos/ignition/v2/config/v3_4/types/luks.go new file mode 100644 index 000000000..90d723870 --- /dev/null +++ b/vendor/github.com/coreos/ignition/v2/config/v3_4/types/luks.go @@ -0,0 +1,71 @@ +// Copyright 2020 Red Hat, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "strings" + + "github.com/coreos/ignition/v2/config/shared/errors" + "github.com/coreos/ignition/v2/config/util" + + "github.com/coreos/vcontext/path" + "github.com/coreos/vcontext/report" +) + +func (l Luks) Key() string { + return l.Name +} + +func (l Luks) IgnoreDuplicates() map[string]struct{} { + return map[string]struct{}{ + "Options": {}, + } +} + +func (l Luks) Validate(c path.ContextPath) (r report.Report) { + if strings.Contains(l.Name, "/") { + r.AddOnError(c.Append("name"), errors.ErrLuksNameContainsSlash) + } + r.AddOnError(c.Append("label"), l.validateLabel()) + if util.NilOrEmpty(l.Device) { + r.AddOnError(c.Append("device"), errors.ErrDiskDeviceRequired) + } else { + r.AddOnError(c.Append("device"), validatePath(*l.Device)) + } + + if util.NotEmpty(l.Clevis.Custom.Pin) && (len(l.Clevis.Tang) > 0 || util.IsTrue(l.Clevis.Tpm2) || (l.Clevis.Threshold != nil && *l.Clevis.Threshold != 0)) { + r.AddOnError(c.Append("clevis"), errors.ErrClevisCustomWithOthers) + } + + // fail if a key file is provided and is not valid + if err := validateURLNilOK(l.KeyFile.Source); err != nil { + r.AddOnError(c.Append("keys"), errors.ErrInvalidLuksKeyFile) + } + return +} + +func (l Luks) validateLabel() error { + if util.NilOrEmpty(l.Label) { + return nil + } + + if len(*l.Label) > 47 { + // LUKS2_LABEL_L has a maximum length of 48 (including the null terminator) + // https://gitlab.com/cryptsetup/cryptsetup/-/blob/1633f030e89ad2f11ae649ba9600997a41abd3fc/lib/luks2/luks2.h#L86 + return errors.ErrLuksLabelTooLong + } + + return nil +} diff --git a/vendor/github.com/coreos/ignition/v2/config/v3_4/types/mode.go b/vendor/github.com/coreos/ignition/v2/config/v3_4/types/mode.go new file mode 100644 index 000000000..9eb7573d8 --- /dev/null +++ b/vendor/github.com/coreos/ignition/v2/config/v3_4/types/mode.go @@ -0,0 +1,26 @@ +// Copyright 2020 Red Hat, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "github.com/coreos/ignition/v2/config/shared/errors" +) + +func validateMode(m *int) error { + if m != nil && (*m < 0 || *m > 07777) { + return errors.ErrFileIllegalMode + } + return nil +} diff --git a/vendor/github.com/coreos/ignition/v2/config/v3_4/types/node.go b/vendor/github.com/coreos/ignition/v2/config/v3_4/types/node.go new file mode 100644 index 000000000..248276e73 --- /dev/null +++ b/vendor/github.com/coreos/ignition/v2/config/v3_4/types/node.go @@ -0,0 +1,59 @@ +// Copyright 2020 Red Hat, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "path" + + "github.com/coreos/ignition/v2/config/shared/errors" + "github.com/coreos/ignition/v2/config/util" + + vpath "github.com/coreos/vcontext/path" + "github.com/coreos/vcontext/report" +) + +func (n Node) Key() string { + return n.Path +} + +func (n Node) Validate(c vpath.ContextPath) (r report.Report) { + r.AddOnError(c.Append("path"), validatePath(n.Path)) + return +} + +func (n Node) Depth() int { + count := 0 + for p := path.Clean(string(n.Path)); p != "/"; count++ { + p = path.Dir(p) + } + return count +} + +func validateIDorName(id *int, name *string) error { + if id != nil && util.NotEmpty(name) { + return errors.ErrBothIDAndNameSet + } + return nil +} + +func (nu NodeUser) Validate(c vpath.ContextPath) (r report.Report) { + r.AddOnError(c, validateIDorName(nu.ID, nu.Name)) + return +} + +func (ng NodeGroup) Validate(c vpath.ContextPath) (r report.Report) { + r.AddOnError(c, validateIDorName(ng.ID, ng.Name)) + return +} diff --git a/vendor/github.com/coreos/ignition/v2/config/v3_4/types/partition.go b/vendor/github.com/coreos/ignition/v2/config/v3_4/types/partition.go new file mode 100644 index 000000000..1b2d97edf --- /dev/null +++ b/vendor/github.com/coreos/ignition/v2/config/v3_4/types/partition.go @@ -0,0 +1,91 @@ +// Copyright 2020 Red Hat, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "fmt" + "regexp" + "strings" + + "github.com/coreos/ignition/v2/config/shared/errors" + "github.com/coreos/ignition/v2/config/util" + + "github.com/coreos/vcontext/path" + "github.com/coreos/vcontext/report" +) + +const ( + guidRegexStr = "^(|[[:xdigit:]]{8}-[[:xdigit:]]{4}-[[:xdigit:]]{4}-[[:xdigit:]]{4}-[[:xdigit:]]{12})$" +) + +var ( + guidRegex = regexp.MustCompile(guidRegexStr) +) + +func (p Partition) Key() string { + if p.Number != 0 { + return fmt.Sprintf("number:%d", p.Number) + } else if p.Label != nil { + return fmt.Sprintf("label:%s", *p.Label) + } else { + return "" + } +} + +func (p Partition) Validate(c path.ContextPath) (r report.Report) { + if util.IsFalse(p.ShouldExist) && + (p.Label != nil || util.NotEmpty(p.TypeGUID) || util.NotEmpty(p.GUID) || p.StartMiB != nil || p.SizeMiB != nil) { + r.AddOnError(c, errors.ErrShouldNotExistWithOthers) + } + if p.Number == 0 && p.Label == nil { + r.AddOnError(c, errors.ErrNeedLabelOrNumber) + } + + r.AddOnError(c.Append("label"), p.validateLabel()) + r.AddOnError(c.Append("guid"), validateGUID(p.GUID)) + r.AddOnError(c.Append("typeGuid"), validateGUID(p.TypeGUID)) + return +} + +func (p Partition) validateLabel() error { + if p.Label == nil { + return nil + } + // http://en.wikipedia.org/wiki/GUID_Partition_Table#Partition_entries: + // 56 (0x38) 72 bytes Partition name (36 UTF-16LE code units) + + // XXX(vc): note GPT calls it a name, we're using label for consistency + // with udev naming /dev/disk/by-partlabel/*. + if len(*p.Label) > 36 { + return errors.ErrLabelTooLong + } + + // sgdisk uses colons for delimitting compound arguments and does not allow escaping them. + if strings.Contains(*p.Label, ":") { + return errors.ErrLabelContainsColon + } + return nil +} + +func validateGUID(guidPointer *string) error { + if guidPointer == nil { + return nil + } + guid := *guidPointer + if ok := guidRegex.MatchString(guid); !ok { + return errors.ErrDoesntMatchGUIDRegex + } + return nil +} diff --git a/vendor/github.com/coreos/ignition/v2/config/v3_4/types/passwd.go b/vendor/github.com/coreos/ignition/v2/config/v3_4/types/passwd.go new file mode 100644 index 000000000..4060a2a6f --- /dev/null +++ b/vendor/github.com/coreos/ignition/v2/config/v3_4/types/passwd.go @@ -0,0 +1,23 @@ +// Copyright 2020 Red Hat, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +func (p PasswdUser) Key() string { + return p.Name +} + +func (g PasswdGroup) Key() string { + return g.Name +} diff --git a/vendor/github.com/coreos/ignition/v2/config/v3_4/types/path.go b/vendor/github.com/coreos/ignition/v2/config/v3_4/types/path.go new file mode 100644 index 000000000..131e300c1 --- /dev/null +++ b/vendor/github.com/coreos/ignition/v2/config/v3_4/types/path.go @@ -0,0 +1,42 @@ +// Copyright 2020 Red Hat, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "path" + + "github.com/coreos/ignition/v2/config/shared/errors" + "github.com/coreos/ignition/v2/config/util" +) + +func validatePath(p string) error { + if p == "" { + return errors.ErrNoPath + } + if !path.IsAbs(p) { + return errors.ErrPathRelative + } + if path.Clean(p) != p { + return errors.ErrDirtyPath + } + return nil +} + +func validatePathNilOK(p *string) error { + if util.NilOrEmpty(p) { + return nil + } + return validatePath(*p) +} diff --git a/vendor/github.com/coreos/ignition/v2/config/v3_4/types/proxy.go b/vendor/github.com/coreos/ignition/v2/config/v3_4/types/proxy.go new file mode 100644 index 000000000..d48d210a0 --- /dev/null +++ b/vendor/github.com/coreos/ignition/v2/config/v3_4/types/proxy.go @@ -0,0 +1,49 @@ +// Copyright 2020 Red Hat, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "net/url" + + "github.com/coreos/ignition/v2/config/shared/errors" + + "github.com/coreos/vcontext/path" + "github.com/coreos/vcontext/report" +) + +func (p Proxy) Validate(c path.ContextPath) (r report.Report) { + validateProxyURL(p.HTTPProxy, c.Append("httpProxy"), &r, true) + validateProxyURL(p.HTTPSProxy, c.Append("httpsProxy"), &r, false) + return +} + +func validateProxyURL(s *string, p path.ContextPath, r *report.Report, httpOk bool) { + if s == nil { + return + } + u, err := url.Parse(*s) + if err != nil { + r.AddOnError(p, errors.ErrInvalidUrl) + return + } + + if u.Scheme != "https" && u.Scheme != "http" { + r.AddOnError(p, errors.ErrInvalidProxy) + return + } + if u.Scheme == "http" && !httpOk { + r.AddOnWarn(p, errors.ErrInsecureProxy) + } +} diff --git a/vendor/github.com/coreos/ignition/v2/config/v3_4/types/raid.go b/vendor/github.com/coreos/ignition/v2/config/v3_4/types/raid.go new file mode 100644 index 000000000..9d69aa366 --- /dev/null +++ b/vendor/github.com/coreos/ignition/v2/config/v3_4/types/raid.go @@ -0,0 +1,62 @@ +// Copyright 2020 Red Hat, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "github.com/coreos/ignition/v2/config/shared/errors" + "github.com/coreos/ignition/v2/config/util" + + "github.com/coreos/vcontext/path" + "github.com/coreos/vcontext/report" +) + +func (r Raid) Key() string { + return r.Name +} + +func (r Raid) IgnoreDuplicates() map[string]struct{} { + return map[string]struct{}{ + "Options": {}, + } +} + +func (ra Raid) Validate(c path.ContextPath) (r report.Report) { + r.AddOnError(c.Append("level"), ra.validateLevel()) + if len(ra.Devices) == 0 { + r.AddOnError(c.Append("devices"), errors.ErrRaidDevicesRequired) + } + return +} + +func (r Raid) validateLevel() error { + if util.NilOrEmpty(r.Level) { + return errors.ErrRaidLevelRequired + } + switch *r.Level { + case "linear", "raid0", "0", "stripe": + if r.Spares != nil && *r.Spares != 0 { + return errors.ErrSparesUnsupportedForLevel + } + case "raid1", "1", "mirror": + case "raid4", "4": + case "raid5", "5": + case "raid6", "6": + case "raid10", "10": + default: + return errors.ErrUnrecognizedRaidLevel + } + + return nil +} diff --git a/vendor/github.com/coreos/ignition/v2/config/v3_4/types/resource.go b/vendor/github.com/coreos/ignition/v2/config/v3_4/types/resource.go new file mode 100644 index 000000000..68da6c7b7 --- /dev/null +++ b/vendor/github.com/coreos/ignition/v2/config/v3_4/types/resource.go @@ -0,0 +1,91 @@ +// Copyright 2020 Red Hat, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "net/url" + + "github.com/coreos/ignition/v2/config/shared/errors" + "github.com/coreos/ignition/v2/config/util" + + "github.com/coreos/vcontext/path" + "github.com/coreos/vcontext/report" +) + +func (res Resource) Key() string { + if res.Source == nil { + return "" + } + return *res.Source +} + +func (res Resource) Validate(c path.ContextPath) (r report.Report) { + r.AddOnError(c.Append("compression"), res.validateCompression()) + r.AddOnError(c.Append("verification", "hash"), res.validateVerification()) + r.AddOnError(c.Append("source"), validateURLNilOK(res.Source)) + r.AddOnError(c.Append("httpHeaders"), res.validateSchemeForHTTPHeaders()) + return +} + +func (res Resource) validateCompression() error { + if res.Compression != nil { + switch *res.Compression { + case "", "gzip": + default: + return errors.ErrCompressionInvalid + } + } + return nil +} + +func (res Resource) validateVerification() error { + if res.Verification.Hash != nil && res.Source == nil { + return errors.ErrVerificationAndNilSource + } + return nil +} + +func (res Resource) validateSchemeForHTTPHeaders() error { + if len(res.HTTPHeaders) < 1 { + return nil + } + + if util.NilOrEmpty(res.Source) { + return errors.ErrInvalidUrl + } + + u, err := url.Parse(*res.Source) + if err != nil { + return errors.ErrInvalidUrl + } + + switch u.Scheme { + case "http", "https": + return nil + default: + return errors.ErrUnsupportedSchemeForHTTPHeaders + } +} + +// Ensure that the Source is specified and valid. This is not called by +// Resource.Validate() because some structs that embed Resource don't +// require Source to be specified. Containing structs that require Source +// should call this function from their Validate(). +func (res Resource) validateRequiredSource() error { + if util.NilOrEmpty(res.Source) { + return errors.ErrSourceRequired + } + return validateURL(*res.Source) +} diff --git a/vendor/github.com/coreos/ignition/v2/config/v3_4/types/schema.go b/vendor/github.com/coreos/ignition/v2/config/v3_4/types/schema.go new file mode 100644 index 000000000..74a9e1225 --- /dev/null +++ b/vendor/github.com/coreos/ignition/v2/config/v3_4/types/schema.go @@ -0,0 +1,259 @@ +package types + +// generated by "schematyper --package=types config/v3_4/schema/ignition.json -o config/v3_4/types/schema.go --root-type=Config" -- DO NOT EDIT + +type Clevis struct { + Custom ClevisCustom `json:"custom,omitempty"` + Tang []Tang `json:"tang,omitempty"` + Threshold *int `json:"threshold,omitempty"` + Tpm2 *bool `json:"tpm2,omitempty"` +} + +type ClevisCustom struct { + Config *string `json:"config,omitempty"` + NeedsNetwork *bool `json:"needsNetwork,omitempty"` + Pin *string `json:"pin,omitempty"` +} + +type Config struct { + Ignition Ignition `json:"ignition"` + KernelArguments KernelArguments `json:"kernelArguments,omitempty"` + Passwd Passwd `json:"passwd,omitempty"` + Storage Storage `json:"storage,omitempty"` + Systemd Systemd `json:"systemd,omitempty"` +} + +type Device string + +type Directory struct { + Node + DirectoryEmbedded1 +} + +type DirectoryEmbedded1 struct { + Mode *int `json:"mode,omitempty"` +} + +type Disk struct { + Device string `json:"device"` + Partitions []Partition `json:"partitions,omitempty"` + WipeTable *bool `json:"wipeTable,omitempty"` +} + +type Dropin struct { + Contents *string `json:"contents,omitempty"` + Name string `json:"name"` +} + +type File struct { + Node + FileEmbedded1 +} + +type FileEmbedded1 struct { + Append []Resource `json:"append,omitempty"` + Contents Resource `json:"contents,omitempty"` + Mode *int `json:"mode,omitempty"` +} + +type Filesystem struct { + Device string `json:"device"` + Format *string `json:"format,omitempty"` + Label *string `json:"label,omitempty"` + MountOptions []MountOption `json:"mountOptions,omitempty"` + Options []FilesystemOption `json:"options,omitempty"` + Path *string `json:"path,omitempty"` + UUID *string `json:"uuid,omitempty"` + WipeFilesystem *bool `json:"wipeFilesystem,omitempty"` +} + +type FilesystemOption string + +type Group string + +type HTTPHeader struct { + Name string `json:"name"` + Value *string `json:"value,omitempty"` +} + +type HTTPHeaders []HTTPHeader + +type Ignition struct { + Config IgnitionConfig `json:"config,omitempty"` + Proxy Proxy `json:"proxy,omitempty"` + Security Security `json:"security,omitempty"` + Timeouts Timeouts `json:"timeouts,omitempty"` + Version string `json:"version"` +} + +type IgnitionConfig struct { + Merge []Resource `json:"merge,omitempty"` + Replace Resource `json:"replace,omitempty"` +} + +type KernelArgument string + +type KernelArguments struct { + ShouldExist []KernelArgument `json:"shouldExist,omitempty"` + ShouldNotExist []KernelArgument `json:"shouldNotExist,omitempty"` +} + +type Link struct { + Node + LinkEmbedded1 +} + +type LinkEmbedded1 struct { + Hard *bool `json:"hard,omitempty"` + Target *string `json:"target,omitempty"` +} + +type Luks struct { + Clevis Clevis `json:"clevis,omitempty"` + Device *string `json:"device,omitempty"` + Discard *bool `json:"discard,omitempty"` + KeyFile Resource `json:"keyFile,omitempty"` + Label *string `json:"label,omitempty"` + Name string `json:"name"` + OpenOptions []OpenOption `json:"openOptions,omitempty"` + Options []LuksOption `json:"options,omitempty"` + UUID *string `json:"uuid,omitempty"` + WipeVolume *bool `json:"wipeVolume,omitempty"` +} + +type LuksOption string + +type MountOption string + +type NoProxyItem string + +type Node struct { + Group NodeGroup `json:"group,omitempty"` + Overwrite *bool `json:"overwrite,omitempty"` + Path string `json:"path"` + User NodeUser `json:"user,omitempty"` +} + +type NodeGroup struct { + ID *int `json:"id,omitempty"` + Name *string `json:"name,omitempty"` +} + +type NodeUser struct { + ID *int `json:"id,omitempty"` + Name *string `json:"name,omitempty"` +} + +type OpenOption string + +type Partition struct { + GUID *string `json:"guid,omitempty"` + Label *string `json:"label,omitempty"` + Number int `json:"number,omitempty"` + Resize *bool `json:"resize,omitempty"` + ShouldExist *bool `json:"shouldExist,omitempty"` + SizeMiB *int `json:"sizeMiB,omitempty"` + StartMiB *int `json:"startMiB,omitempty"` + TypeGUID *string `json:"typeGuid,omitempty"` + WipePartitionEntry *bool `json:"wipePartitionEntry,omitempty"` +} + +type Passwd struct { + Groups []PasswdGroup `json:"groups,omitempty"` + Users []PasswdUser `json:"users,omitempty"` +} + +type PasswdGroup struct { + Gid *int `json:"gid,omitempty"` + Name string `json:"name"` + PasswordHash *string `json:"passwordHash,omitempty"` + ShouldExist *bool `json:"shouldExist,omitempty"` + System *bool `json:"system,omitempty"` +} + +type PasswdUser struct { + Gecos *string `json:"gecos,omitempty"` + Groups []Group `json:"groups,omitempty"` + HomeDir *string `json:"homeDir,omitempty"` + Name string `json:"name"` + NoCreateHome *bool `json:"noCreateHome,omitempty"` + NoLogInit *bool `json:"noLogInit,omitempty"` + NoUserGroup *bool `json:"noUserGroup,omitempty"` + PasswordHash *string `json:"passwordHash,omitempty"` + PrimaryGroup *string `json:"primaryGroup,omitempty"` + SSHAuthorizedKeys []SSHAuthorizedKey `json:"sshAuthorizedKeys,omitempty"` + Shell *string `json:"shell,omitempty"` + ShouldExist *bool `json:"shouldExist,omitempty"` + System *bool `json:"system,omitempty"` + UID *int `json:"uid,omitempty"` +} + +type Proxy struct { + HTTPProxy *string `json:"httpProxy,omitempty"` + HTTPSProxy *string `json:"httpsProxy,omitempty"` + NoProxy []NoProxyItem `json:"noProxy,omitempty"` +} + +type Raid struct { + Devices []Device `json:"devices,omitempty"` + Level *string `json:"level,omitempty"` + Name string `json:"name"` + Options []RaidOption `json:"options,omitempty"` + Spares *int `json:"spares,omitempty"` +} + +type RaidOption string + +type Resource struct { + Compression *string `json:"compression,omitempty"` + HTTPHeaders HTTPHeaders `json:"httpHeaders,omitempty"` + Source *string `json:"source,omitempty"` + Verification Verification `json:"verification,omitempty"` +} + +type SSHAuthorizedKey string + +type Security struct { + TLS TLS `json:"tls,omitempty"` +} + +type Storage struct { + Directories []Directory `json:"directories,omitempty"` + Disks []Disk `json:"disks,omitempty"` + Files []File `json:"files,omitempty"` + Filesystems []Filesystem `json:"filesystems,omitempty"` + Links []Link `json:"links,omitempty"` + Luks []Luks `json:"luks,omitempty"` + Raid []Raid `json:"raid,omitempty"` +} + +type Systemd struct { + Units []Unit `json:"units,omitempty"` +} + +type TLS struct { + CertificateAuthorities []Resource `json:"certificateAuthorities,omitempty"` +} + +type Tang struct { + Advertisement *string `json:"advertisement,omitempty"` + Thumbprint *string `json:"thumbprint,omitempty"` + URL string `json:"url,omitempty"` +} + +type Timeouts struct { + HTTPResponseHeaders *int `json:"httpResponseHeaders,omitempty"` + HTTPTotal *int `json:"httpTotal,omitempty"` +} + +type Unit struct { + Contents *string `json:"contents,omitempty"` + Dropins []Dropin `json:"dropins,omitempty"` + Enabled *bool `json:"enabled,omitempty"` + Mask *bool `json:"mask,omitempty"` + Name string `json:"name"` +} + +type Verification struct { + Hash *string `json:"hash,omitempty"` +} diff --git a/vendor/github.com/coreos/ignition/v2/config/v3_4/types/storage.go b/vendor/github.com/coreos/ignition/v2/config/v3_4/types/storage.go new file mode 100644 index 000000000..20cb73048 --- /dev/null +++ b/vendor/github.com/coreos/ignition/v2/config/v3_4/types/storage.go @@ -0,0 +1,115 @@ +// Copyright 2020 Red Hat, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "path" + "strings" + + "github.com/coreos/ignition/v2/config/shared/errors" + "github.com/coreos/ignition/v2/config/util" + + vpath "github.com/coreos/vcontext/path" + "github.com/coreos/vcontext/report" +) + +func (s Storage) MergedKeys() map[string]string { + return map[string]string{ + "Directories": "Node", + "Files": "Node", + "Links": "Node", + } +} + +func (s Storage) Validate(c vpath.ContextPath) (r report.Report) { + s.validateDirectories(c, &r) + s.validateFiles(c, &r) + s.validateLinks(c, &r) + s.validateFilesystems(c, &r) + return +} + +func (s Storage) validateDirectories(c vpath.ContextPath, r *report.Report) { + for i, d := range s.Directories { + for _, l := range s.Links { + if strings.HasPrefix(d.Path, l.Path+"/") { + r.AddOnError(c.Append("directories", i), errors.ErrDirectoryUsedSymlink) + } + } + } +} + +func (s Storage) validateFiles(c vpath.ContextPath, r *report.Report) { + for i, f := range s.Files { + for _, l := range s.Links { + if strings.HasPrefix(f.Path, l.Path+"/") { + r.AddOnError(c.Append("files", i), errors.ErrFileUsedSymlink) + } + } + } +} + +func (s Storage) validateLinks(c vpath.ContextPath, r *report.Report) { + for i, l1 := range s.Links { + for _, l2 := range s.Links { + if strings.HasPrefix(l1.Path, l2.Path+"/") { + r.AddOnError(c.Append("links", i), errors.ErrLinkUsedSymlink) + } + } + if util.NilOrEmpty(l1.Target) { + r.AddOnError(c.Append("links", i, "target"), errors.ErrLinkTargetRequired) + continue + } + if !util.IsTrue(l1.Hard) { + continue + } + target := path.Clean(*l1.Target) + if !path.IsAbs(target) { + target = path.Join(l1.Path, *l1.Target) + } + for _, d := range s.Directories { + if target == d.Path { + r.AddOnError(c.Append("links", i), errors.ErrHardLinkToDirectory) + } + } + ownerCheck := func(ok bool, path vpath.ContextPath) { + if !ok { + r.AddOnWarn(path, errors.ErrHardLinkSpecifiesOwner) + } + } + ownerCheck(l1.User.ID == nil, c.Append("links", i, "user", "id")) + ownerCheck(l1.User.Name == nil, c.Append("links", i, "user", "name")) + ownerCheck(l1.Group.ID == nil, c.Append("links", i, "group", "id")) + ownerCheck(l1.Group.Name == nil, c.Append("links", i, "group", "name")) + } +} + +func (s Storage) validateFilesystems(c vpath.ContextPath, r *report.Report) { + disks := make(map[string]Disk) + for _, d := range s.Disks { + disks[d.Device] = d + } + + for i, f := range s.Filesystems { + disk, exist := disks[f.Device] + if exist { + if len(disk.Partitions) > 0 { + r.AddOnWarn(c.Append("filesystems", i, "device"), errors.ErrPartitionsOverwritten) + } else if !util.IsTrue(f.WipeFilesystem) && util.IsTrue(disk.WipeTable) { + r.AddOnWarn(c.Append("filesystems", i, "device"), errors.ErrFilesystemImplicitWipe) + } + } + } +} diff --git a/vendor/github.com/coreos/ignition/v2/config/v3_4/types/systemd.go b/vendor/github.com/coreos/ignition/v2/config/v3_4/types/systemd.go new file mode 100644 index 000000000..ac521ba73 --- /dev/null +++ b/vendor/github.com/coreos/ignition/v2/config/v3_4/types/systemd.go @@ -0,0 +1,61 @@ +// Copyright 2022 Red Hat, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "regexp" + + "github.com/coreos/ignition/v2/config/shared/errors" + "github.com/coreos/ignition/v2/config/shared/parse" + "github.com/coreos/ignition/v2/config/util" + + vpath "github.com/coreos/vcontext/path" + "github.com/coreos/vcontext/report" +) + +func (s Systemd) Validate(c vpath.ContextPath) (r report.Report) { + units := make(map[string]Unit) + checkInstanceUnit := regexp.MustCompile(`^(.+?)@(.+?)\.service$`) + for _, d := range s.Units { + units[d.Name] = d + } + for index, unit := range s.Units { + if checkInstanceUnit.MatchString(unit.Name) && util.IsTrue(unit.Enabled) { + instUnitSlice := checkInstanceUnit.FindSubmatch([]byte(unit.Name)) + instantiableUnit := string(instUnitSlice[1]) + "@.service" + if _, ok := units[instantiableUnit]; ok && util.NotEmpty(units[instantiableUnit].Contents) { + foundInstallSection := false + // we're doing a separate validation pass on each unit to identify + // if an instantiable unit has the install section. So logging an + // `AddOnError` will produce duplicate errors on bad unit contents + // because we're already doing that while validating a unit separately. + opts, err := parse.ParseUnitContents(units[instantiableUnit].Contents) + if err != nil { + continue + } + for _, section := range opts { + if section.Section == "Install" { + foundInstallSection = true + break + } + } + if !foundInstallSection { + r.AddOnWarn(c.Append("units", index, "contents"), errors.NewNoInstallSectionForInstantiableUnitError(instantiableUnit, unit.Name)) + } + } + } + } + return +} diff --git a/vendor/github.com/coreos/ignition/v2/config/v3_4/types/tang.go b/vendor/github.com/coreos/ignition/v2/config/v3_4/types/tang.go new file mode 100644 index 000000000..1839d6cc3 --- /dev/null +++ b/vendor/github.com/coreos/ignition/v2/config/v3_4/types/tang.go @@ -0,0 +1,65 @@ +// Copyright 2020 Red Hat, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "encoding/json" + "net/url" + + "github.com/coreos/ignition/v2/config/shared/errors" + "github.com/coreos/ignition/v2/config/util" + + "github.com/coreos/vcontext/path" + "github.com/coreos/vcontext/report" +) + +func (t Tang) Key() string { + return t.URL +} + +func (t Tang) Validate(c path.ContextPath) (r report.Report) { + r.AddOnError(c.Append("url"), validateTangURL(t.URL)) + if util.NilOrEmpty(t.Thumbprint) { + r.AddOnError(c.Append("thumbprint"), errors.ErrTangThumbprintRequired) + } + r.AddOnError(c.Append("advertisement"), validateTangAdvertisement(t.Advertisement)) + return +} + +func validateTangURL(s string) error { + u, err := url.Parse(s) + if err != nil { + return errors.ErrInvalidUrl + } + + switch u.Scheme { + case "http", "https": + return nil + default: + return errors.ErrInvalidScheme + } +} + +func validateTangAdvertisement(s *string) error { + if util.NotEmpty(s) { + var adv any + err := json.Unmarshal([]byte(*s), &adv) + if err != nil { + return errors.ErrInvalidTangAdvertisement + } + } + + return nil +} diff --git a/vendor/github.com/coreos/ignition/v2/config/v3_4/types/tls.go b/vendor/github.com/coreos/ignition/v2/config/v3_4/types/tls.go new file mode 100644 index 000000000..8890e397e --- /dev/null +++ b/vendor/github.com/coreos/ignition/v2/config/v3_4/types/tls.go @@ -0,0 +1,27 @@ +// Copyright 2020 Red Hat, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "github.com/coreos/vcontext/path" + "github.com/coreos/vcontext/report" +) + +func (tls TLS) Validate(c path.ContextPath) (r report.Report) { + for i, ca := range tls.CertificateAuthorities { + r.AddOnError(c.Append("certificateAuthorities", i), ca.validateRequiredSource()) + } + return +} diff --git a/vendor/github.com/coreos/ignition/v2/config/v3_4/types/unit.go b/vendor/github.com/coreos/ignition/v2/config/v3_4/types/unit.go new file mode 100644 index 000000000..c5ee1e8e3 --- /dev/null +++ b/vendor/github.com/coreos/ignition/v2/config/v3_4/types/unit.go @@ -0,0 +1,68 @@ +// Copyright 2020 Red Hat, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "path" + + "github.com/coreos/ignition/v2/config/shared/errors" + "github.com/coreos/ignition/v2/config/shared/parse" + "github.com/coreos/ignition/v2/config/shared/validations" + "github.com/coreos/ignition/v2/config/util" + + cpath "github.com/coreos/vcontext/path" + "github.com/coreos/vcontext/report" +) + +func (u Unit) Key() string { + return u.Name +} + +func (d Dropin) Key() string { + return d.Name +} + +func (u Unit) Validate(c cpath.ContextPath) (r report.Report) { + r.AddOnError(c.Append("name"), validateName(u.Name)) + c = c.Append("contents") + opts, err := parse.ParseUnitContents(u.Contents) + r.AddOnError(c, err) + + r.AddOnWarn(c, validations.ValidateInstallSection(u.Name, util.IsTrue(u.Enabled), util.NilOrEmpty(u.Contents), opts)) + + return +} + +func validateName(name string) error { + switch path.Ext(name) { + case ".service", ".socket", ".device", ".mount", ".automount", ".swap", ".target", ".path", ".timer", ".snapshot", ".slice", ".scope": + default: + return errors.ErrInvalidSystemdExt + } + return nil +} + +func (d Dropin) Validate(c cpath.ContextPath) (r report.Report) { + _, err := parse.ParseUnitContents(d.Contents) + r.AddOnError(c.Append("contents"), err) + + switch path.Ext(d.Name) { + case ".conf": + default: + r.AddOnError(c.Append("name"), errors.ErrInvalidSystemdDropinExt) + } + + return +} diff --git a/vendor/github.com/coreos/ignition/v2/config/v3_4/types/url.go b/vendor/github.com/coreos/ignition/v2/config/v3_4/types/url.go new file mode 100644 index 000000000..3ca189dae --- /dev/null +++ b/vendor/github.com/coreos/ignition/v2/config/v3_4/types/url.go @@ -0,0 +1,83 @@ +// Copyright 2020 Red Hat, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "net/url" + "strings" + + "github.com/aws/aws-sdk-go/aws/arn" + "github.com/vincent-petithory/dataurl" + + "github.com/coreos/ignition/v2/config/shared/errors" + "github.com/coreos/ignition/v2/config/util" +) + +func validateURL(s string) error { + u, err := url.Parse(s) + if err != nil { + return errors.ErrInvalidUrl + } + + switch u.Scheme { + case "http", "https", "tftp", "gs": + return nil + case "s3": + if v, ok := u.Query()["versionId"]; ok { + if len(v) == 0 || v[0] == "" { + return errors.ErrInvalidS3ObjectVersionId + } + } + return nil + case "arn": + fullURL := u.Scheme + ":" + u.Opaque + if !arn.IsARN(fullURL) { + return errors.ErrInvalidS3ARN + } + s3arn, err := arn.Parse(fullURL) + if err != nil { + return err + } + if s3arn.Service != "s3" { + return errors.ErrInvalidS3ARN + } + urlSplit := strings.Split(fullURL, "/") + if strings.HasPrefix(s3arn.Resource, "accesspoint/") && len(urlSplit) < 3 { + return errors.ErrInvalidS3ARN + } else if len(urlSplit) < 2 { + return errors.ErrInvalidS3ARN + } + if v, ok := u.Query()["versionId"]; ok { + if len(v) == 0 || v[0] == "" { + return errors.ErrInvalidS3ObjectVersionId + } + } + return nil + case "data": + if _, err := dataurl.DecodeString(s); err != nil { + return err + } + return nil + default: + return errors.ErrInvalidScheme + } +} + +func validateURLNilOK(s *string) error { + if util.NilOrEmpty(s) { + return nil + } + return validateURL(*s) +} diff --git a/vendor/github.com/coreos/ignition/v2/config/v3_4/types/verification.go b/vendor/github.com/coreos/ignition/v2/config/v3_4/types/verification.go new file mode 100644 index 000000000..5def6f04b --- /dev/null +++ b/vendor/github.com/coreos/ignition/v2/config/v3_4/types/verification.go @@ -0,0 +1,71 @@ +// Copyright 2020 Red Hat, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "crypto" + "encoding/hex" + "strings" + + "github.com/coreos/ignition/v2/config/shared/errors" + + "github.com/coreos/vcontext/path" + "github.com/coreos/vcontext/report" +) + +// HashParts will return the sum and function (in that order) of the hash stored +// in this Verification, or an error if there is an issue during parsing. +func (v Verification) HashParts() (string, string, error) { + if v.Hash == nil { + // The hash can be nil + return "", "", nil + } + parts := strings.SplitN(*v.Hash, "-", 2) + if len(parts) != 2 { + return "", "", errors.ErrHashMalformed + } + + return parts[0], parts[1], nil +} + +func (v Verification) Validate(c path.ContextPath) (r report.Report) { + c = c.Append("hash") + if v.Hash == nil { + // The hash can be nil + return + } + + function, sum, err := v.HashParts() + if err != nil { + r.AddOnError(c, err) + return + } + var hash crypto.Hash + switch function { + case "sha512": + hash = crypto.SHA512 + case "sha256": + hash = crypto.SHA256 + default: + r.AddOnError(c, errors.ErrHashUnrecognized) + return + } + + if len(sum) != hex.EncodedLen(hash.Size()) { + r.AddOnError(c, errors.ErrHashWrongSize) + } + + return +} diff --git a/vendor/github.com/openshift/api/.ci-operator.yaml b/vendor/github.com/openshift/api/.ci-operator.yaml new file mode 100644 index 000000000..844f0d26a --- /dev/null +++ b/vendor/github.com/openshift/api/.ci-operator.yaml @@ -0,0 +1,4 @@ +build_root_image: + name: release + namespace: openshift + tag: rhel-8-release-golang-1.20-openshift-4.14 diff --git a/vendor/github.com/openshift/api/.gitattributes b/vendor/github.com/openshift/api/.gitattributes new file mode 100644 index 000000000..124067fe7 --- /dev/null +++ b/vendor/github.com/openshift/api/.gitattributes @@ -0,0 +1,7 @@ +# Set unix LF EOL for shell scripts +*.sh text eol=lf + +**/zz_generated.*.go linguist-generated=true +**/types.generated.go linguist-generated=true +**/generated.pb.go linguist-generated=true +**/generated.proto linguist-generated=true diff --git a/vendor/github.com/openshift/api/.gitignore b/vendor/github.com/openshift/api/.gitignore new file mode 100644 index 000000000..760e2df44 --- /dev/null +++ b/vendor/github.com/openshift/api/.gitignore @@ -0,0 +1,19 @@ +# Binaries for programs and plugins +*.exe +*.dll +*.so +*.dylib + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 +.glide/ +.idea/ +_output/ +tests/bin/ + +models-schema diff --git a/vendor/github.com/openshift/api/Makefile b/vendor/github.com/openshift/api/Makefile new file mode 100644 index 000000000..9310a7212 --- /dev/null +++ b/vendor/github.com/openshift/api/Makefile @@ -0,0 +1,123 @@ +all: build +.PHONY: all + +update: update-codegen-crds + +RUNTIME ?= podman +RUNTIME_IMAGE_NAME ?= registry.ci.openshift.org/openshift/release:rhel-8-release-golang-1.20-openshift-4.14 + +EXCLUDE_DIRS := _output/ dependencymagnet/ hack/ third_party/ tls/ tools/ vendor/ tests/ +GO_PACKAGES :=$(addsuffix ...,$(addprefix ./,$(filter-out $(EXCLUDE_DIRS), $(wildcard */)))) + +.PHONY: test-unit +test-unit: + go test -v $(GO_PACKAGES) + +################################################################################## +# +# BEGIN: Update codegen-crds. Defaults to generating updates for all API packages. +# To run a subset of packages: +# - Filter by group with make update-codegen-crds- +# E.g. make update-codegen-crds-machine +# - Set API_GROUP_VERSIONS to a space separated list of /. +# E.g. API_GROUP_VERSIONS="apps/v1 build/v1" make update-codegen-crds. +# FeatureSet generation is controlled at the group level by the +# .codegen.yaml file. +# +################################################################################## + +# Ensure update-scripts are run before crd-gen so updates to Godoc are included in CRDs. +.PHONY: update-codegen-crds +update-codegen-crds: update-scripts + hack/update-codegen-crds.sh + +##################### +# +# END: Update Codegen +# +##################### + +.PHONY: verify-scripts +verify-scripts: + bash -x hack/verify-deepcopy.sh + bash -x hack/verify-openapi.sh + bash -x hack/verify-protobuf.sh + bash -x hack/verify-swagger-docs.sh + hack/verify-crds.sh + bash -x hack/verify-types.sh + bash -x hack/verify-compatibility.sh + bash -x hack/verify-integration-tests.sh + bash -x hack/verify-group-versions.sh + bash -x hack/verify-prerelease-lifecycle-gen.sh + +.PHONY: verify +verify: verify-scripts verify-codegen-crds + +.PHONY: verify-codegen-crds +verify-codegen-crds: + bash -x hack/verify-codegen-crds.sh + +.PHONY: verify-% +verify-%: + make $* + git diff --exit-code + +################################################################################################ +# +# BEGIN: Update scripts. Defaults to generating updates for all API packages. +# Set API_GROUP_VERSIONS to a space separated list of / to limit +# the scope of the updates. Eg API_GROUP_VERSIONS="apps/v1 build/v1" make update-scripts. +# Note: Protobuf generation is handled separately, see hack/lib/init.sh. +# +################################################################################################ + +.PHONY: update-scripts +update-scripts: update-compatibility update-openapi update-deepcopy update-protobuf update-swagger-docs tests-vendor update-prerelease-lifecycle-gen + +.PHONY: update-compatibility +update-compatibility: + hack/update-compatibility.sh + +.PHONY: update-openapi +update-openapi: + hack/update-openapi.sh + +.PHONY: update-deepcopy +update-deepcopy: + hack/update-deepcopy.sh + +.PHONY: update-protobuf +update-protobuf: + hack/update-protobuf.sh + +.PHONY: update-swagger-docs +update-swagger-docs: + hack/update-swagger-docs.sh + +.PHONY: update-prerelease-lifecycle-gen +update-prerelease-lifecycle-gen: + hack/update-prerelease-lifecycle-gen.sh + +##################### +# +# END: Update scripts +# +##################### + +deps: + go mod tidy + go mod vendor + go mod verify + +verify-with-container: + $(RUNTIME) run -ti --rm -v $(PWD):/go/src/github.com/openshift/api:z -w /go/src/github.com/openshift/api $(RUNTIME_IMAGE_NAME) make verify + +generate-with-container: + $(RUNTIME) run -ti --rm -v $(PWD):/go/src/github.com/openshift/api:z -w /go/src/github.com/openshift/api $(RUNTIME_IMAGE_NAME) make update + +.PHONY: integration +integration: + make -C tests integration + +tests-vendor: + make -C tests vendor diff --git a/vendor/github.com/openshift/api/OWNERS b/vendor/github.com/openshift/api/OWNERS new file mode 100644 index 000000000..ce5e8dc33 --- /dev/null +++ b/vendor/github.com/openshift/api/OWNERS @@ -0,0 +1,29 @@ +reviewers: + - adambkaplan + - abhinavdahiya + - smarterclayton + - deads2k + - derekwaynecarr + - eparis + - JoelSpeed + - jwforres + - knobunc + - sjenning + - mfojtik + - soltysh + - sttts + - bparees +approvers: + - bparees + - deads2k + - derekwaynecarr + - eparis + - JoelSpeed + - jwforres + - knobunc + - mfojtik + - sjenning + - smarterclayton + - soltysh + - spadgett + - sttts diff --git a/vendor/github.com/openshift/api/README.md b/vendor/github.com/openshift/api/README.md new file mode 100644 index 000000000..5ad3880be --- /dev/null +++ b/vendor/github.com/openshift/api/README.md @@ -0,0 +1,88 @@ +# api +The canonical location of the OpenShift API definition. This repo holds the API type definitions and serialization code used by [openshift/client-go](https://github.com/openshift/client-go) + +## defining new APIs + +When defining a new API, please follow [the OpenShift API +conventions](https://github.com/openshift/enhancements/blob/master/CONVENTIONS.md#api), +and then follow the instructions below to regenerate CRDs (if necessary) and +submit a pull request with your new API definitions and generated files. + +### required labels + +In addition to the standard `lgtm` and `approved` labels this repository requires either: + +`bugzilla/valid-bug` - applied if your PR references a valid bugzilla bug + +OR + +`qe-approved`, `docs-approved`, and `px-approved` - these labels can be applied by anyone in the openshift org via the `/label` command. + +Who should apply these qe/docs/px labels? +- For a no-FF team who is merging a feature before code freeze, they need to get those labels applied to their api repo PR by the appropriate teams (i.e. qe, docs, px) +- For a FF(traditional) team who is merging a feature before FF, they can self-apply the labels(via /label commands), they are basically irrelevant for those teams +- For a FF team who is merging a feature after FF, the PR should be rejected barring an exception + +Why are these labels needed? + +We need a way for no-FF teams to be able to merge post-FF that does not require a BZ. For non-shared repos that mechanism is the +qe/docs/px-approved labels. We are expanding that mechanism to shared repos because the alternative would be that no-FF teams would +put a dummy `bugzilla/valid-bug` label on their feature PRs in order to be able to merge them after feature freeze. Since most +individuals can't apply a `bugzilla/valid-bug` label to a PR, this introduces additional obstacles on those PRs. Conversely, anyone +can apply the docs/qe/px-approved labels, so "FF" teams that need to apply these labels to merge can do so w/o needing to involve +anyone additional. + +Does this mean feature-freeze teams can use the no-FF process to merge code? + +No, signing a team up to be a no-FF team includes some basic education on the process and includes ensuring the associated QE+Docs +participants are aware the team is moving to that model. If you'd like to sign your team up, please speak with Gina Hargan who will +be happy to help on-board your team. + +## generating CRD schemas + +Since Kubernetes 1.16, every CRD created in `apiextensions.k8s.io/v1` is required to have a [structural OpenAPIV3 schema](https://kubernetes.io/blog/2019/06/20/crd-structural-schema/). The schemas provide server-side validation for fields, as well as providing the descriptions for `oc explain`. Moreover, schemas ensure structural consistency of data in etcd. Without it anything can be stored in a resource which can have security implications. As we host many of our CRDs in this repo along with their corresponding Go types we also require them to have schemas. However, the following instructions apply for CRDs that are not hosted here as well. + +These schemas are often very long and complex, and should not be written by hand. For OpenShift, we provide Makefile targets in [build-machinery-go](https://github.com/openshift/build-machinery-go/) which generate the schema, built on upstream's [controller-gen](https://github.com/kubernetes-sigs/controller-tools) tool. + +If you make a change to a CRD type in this repo, simply calling `make update-codegen-crds` should regenerate all CRDs and update the manifests. If yours is not updated, ensure that the path to its API is included in our [calls to the Makefile targets](https://github.com/openshift/api/blob/release-4.5/Makefile#L17-L29). + +To add this generator to another repo: +1. Vendor `github.com/openshift/build-machinery-go` + +2. Update your `Makefile` to include the following: +``` +include $(addprefix ./vendor/github.com/openshift/build-machinery-go/make/, \ + targets/openshift/crd-schema-gen.mk \ +) + +$(call add-crd-gen,,,,) +``` +The parameters for the call are: + +1. `TARGET_NAME`: The name of your generated Make target. This can be anything, as long as it does not conflict with another make target. Recommended to be your api name. +2. `API_DIRECTORY`: The location of your API. For example if your Go types are located under `pkg/apis/myoperator/v1/types.go`, this should be `./pkg/apis/myoperator/v1`. +3. `CRD_MANIFESTS`: The directory your CRDs are located in. For example, if that is `manifests/my_operator.crd.yaml` then it should be `./manifests` +4. `MANIFEST_OUTPUT`: This should most likely be the same as `CRD_MANIFESTS`, and is only provided for flexibility to output generated code to a different directory. + +You can include as many calls to different APIs as necessary, or if you have multiple APIs under the same directory (eg, `v1` and `v2beta1`) you can use 1 call to the parent directory pointing to your API. + +After this, calling `make update-codegen-crds` should generate a new structural OpenAPIV3 schema for your CRDs. + +**Notes** +- This will not generate entire CRDs, only their OpenAPIV3 schemas. If you do not already have a CRD, you will get no output from the generator. +- Ensure that your API is correctly declared for the generator to pick it up. That means, in your `doc.go`, include the following: + 1. `// +groupName=`, this should match the `group` in your CRD `spec` + 2. `// +kubebuilder:validation:Optional`, this tells the operator that fields should be optional unless explicitly marked with `// +kubebuilder:validation:Required` + +For more information on the API markers to add to your Go types, see the [Kubebuilder book](https://book.kubebuilder.io/reference/markers.html) + +### Post-schema-generation Patches + +Schema generation features might be limited or fall behind what CRD schemas supports in the latest Kubernetes version. +To work around this, there are two patch mechanisms implemented by the `add-crd-gen` target. Basic idea is that you +place a patch file next to the CRD yaml manifest with either `yaml-merge-patch` or `yaml-patch` as extension, +but with the same base name. The `update-codegen-crds` Makefile target will apply these **after** calling +kubebuilder's controller-gen: + +- `yaml-merge-patch`: these are applied via `yq m -x ` compare https://mikefarah.gitbook.io/yq/commands/merge#overwrite-values. +- `yaml-patch`: these are applied via `yaml-patch -o < ` using https://github.com/krishicks/yaml-patch. diff --git a/vendor/github.com/openshift/api/apiserver/.codegen.yaml b/vendor/github.com/openshift/api/apiserver/.codegen.yaml new file mode 100644 index 000000000..ffa2c8d9b --- /dev/null +++ b/vendor/github.com/openshift/api/apiserver/.codegen.yaml @@ -0,0 +1,2 @@ +swaggerdocs: + commentPolicy: Warn diff --git a/vendor/github.com/openshift/api/apiserver/install.go b/vendor/github.com/openshift/api/apiserver/install.go new file mode 100644 index 000000000..c0cf2ac29 --- /dev/null +++ b/vendor/github.com/openshift/api/apiserver/install.go @@ -0,0 +1,22 @@ +package apiserver + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1 "github.com/openshift/api/apiserver/v1" +) + +var ( + schemeBuilder = runtime.NewSchemeBuilder(v1.Install) + // Install is a function which adds every version of this group to a scheme + Install = schemeBuilder.AddToScheme +) + +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: "apiserver.openshift.io", Resource: resource} +} + +func Kind(kind string) schema.GroupKind { + return schema.GroupKind{Group: "apiserver.openshift.io", Kind: kind} +} diff --git a/vendor/github.com/openshift/api/apiserver/v1/Makefile b/vendor/github.com/openshift/api/apiserver/v1/Makefile new file mode 100644 index 000000000..a2d1fa49b --- /dev/null +++ b/vendor/github.com/openshift/api/apiserver/v1/Makefile @@ -0,0 +1,3 @@ +.PHONY: test +test: + make -C ../../tests test GINKGO_EXTRA_ARGS=--focus="apiserver.openshift.io/v1" diff --git a/vendor/github.com/openshift/api/apiserver/v1/apiserver.openshift.io_apirequestcount.yaml b/vendor/github.com/openshift/api/apiserver/v1/apiserver.openshift.io_apirequestcount.yaml new file mode 100644 index 000000000..c3c978003 --- /dev/null +++ b/vendor/github.com/openshift/api/apiserver/v1/apiserver.openshift.io_apirequestcount.yaml @@ -0,0 +1,254 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/897 + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: apirequestcounts.apiserver.openshift.io +spec: + group: apiserver.openshift.io + names: + kind: APIRequestCount + listKind: APIRequestCountList + plural: apirequestcounts + singular: apirequestcount + scope: Cluster + versions: + - name: v1 + served: true + storage: true + subresources: + status: {} + additionalPrinterColumns: + - name: RemovedInRelease + type: string + description: Release in which an API will be removed. + jsonPath: .status.removedInRelease + - name: RequestsInCurrentHour + type: integer + description: Number of requests in the current hour. + jsonPath: .status.currentHour.requestCount + - name: RequestsInLast24h + type: integer + description: Number of requests in the last 24h. + jsonPath: .status.requestCount + "schema": + "openAPIV3Schema": + description: "APIRequestCount tracks requests made to an API. The instance name must be of the form `resource.version.group`, matching the resource. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec defines the characteristics of the resource. + type: object + properties: + numberOfUsersToReport: + description: numberOfUsersToReport is the number of users to include in the report. If unspecified or zero, the default is ten. This is default is subject to change. + type: integer + format: int64 + default: 10 + maximum: 100 + minimum: 0 + status: + description: status contains the observed state of the resource. + type: object + properties: + conditions: + description: conditions contains details of the current status of this API Resource. + type: array + items: + description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, \n type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + type: object + required: + - lastTransitionTime + - message + - reason + - status + - type + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + type: string + format: date-time + message: + description: message is a human readable message indicating details about the transition. This may be an empty string. + type: string + maxLength: 32768 + observedGeneration: + description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. + type: integer + format: int64 + minimum: 0 + reason: + description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. + type: string + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + status: + description: status of the condition, one of True, False, Unknown. + type: string + enum: + - "True" + - "False" + - Unknown + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + type: string + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + currentHour: + description: currentHour contains request history for the current hour. This is porcelain to make the API easier to read by humans seeing if they addressed a problem. This field is reset on the hour. + type: object + properties: + byNode: + description: byNode contains logs of requests per node. + type: array + maxItems: 512 + items: + description: PerNodeAPIRequestLog contains logs of requests to a certain node. + type: object + properties: + byUser: + description: byUser contains request details by top .spec.numberOfUsersToReport users. Note that because in the case of an apiserver, restart the list of top users is determined on a best-effort basis, the list might be imprecise. In addition, some system users may be explicitly included in the list. + type: array + maxItems: 500 + items: + description: PerUserAPIRequestCount contains logs of a user's requests. + type: object + properties: + byVerb: + description: byVerb details by verb. + type: array + maxItems: 10 + items: + description: PerVerbAPIRequestCount requestCounts requests by API request verb. + type: object + properties: + requestCount: + description: requestCount of requests for verb. + type: integer + format: int64 + minimum: 0 + verb: + description: verb of API request (get, list, create, etc...) + type: string + maxLength: 20 + requestCount: + description: requestCount of requests by the user across all verbs. + type: integer + format: int64 + minimum: 0 + userAgent: + description: userAgent that made the request. The same user often has multiple binaries which connect (pods with many containers). The different binaries will have different userAgents, but the same user. In addition, we have userAgents with version information embedded and the userName isn't likely to change. + type: string + maxLength: 1024 + username: + description: userName that made the request. + type: string + maxLength: 512 + nodeName: + description: nodeName where the request are being handled. + type: string + maxLength: 512 + minLength: 1 + requestCount: + description: requestCount is a sum of all requestCounts across all users, even those outside of the top 10 users. + type: integer + format: int64 + minimum: 0 + requestCount: + description: requestCount is a sum of all requestCounts across nodes. + type: integer + format: int64 + minimum: 0 + last24h: + description: last24h contains request history for the last 24 hours, indexed by the hour, so 12:00AM-12:59 is in index 0, 6am-6:59am is index 6, etc. The index of the current hour is updated live and then duplicated into the requestsLastHour field. + type: array + maxItems: 24 + items: + description: PerResourceAPIRequestLog logs request for various nodes. + type: object + properties: + byNode: + description: byNode contains logs of requests per node. + type: array + maxItems: 512 + items: + description: PerNodeAPIRequestLog contains logs of requests to a certain node. + type: object + properties: + byUser: + description: byUser contains request details by top .spec.numberOfUsersToReport users. Note that because in the case of an apiserver, restart the list of top users is determined on a best-effort basis, the list might be imprecise. In addition, some system users may be explicitly included in the list. + type: array + maxItems: 500 + items: + description: PerUserAPIRequestCount contains logs of a user's requests. + type: object + properties: + byVerb: + description: byVerb details by verb. + type: array + maxItems: 10 + items: + description: PerVerbAPIRequestCount requestCounts requests by API request verb. + type: object + properties: + requestCount: + description: requestCount of requests for verb. + type: integer + format: int64 + minimum: 0 + verb: + description: verb of API request (get, list, create, etc...) + type: string + maxLength: 20 + requestCount: + description: requestCount of requests by the user across all verbs. + type: integer + format: int64 + minimum: 0 + userAgent: + description: userAgent that made the request. The same user often has multiple binaries which connect (pods with many containers). The different binaries will have different userAgents, but the same user. In addition, we have userAgents with version information embedded and the userName isn't likely to change. + type: string + maxLength: 1024 + username: + description: userName that made the request. + type: string + maxLength: 512 + nodeName: + description: nodeName where the request are being handled. + type: string + maxLength: 512 + minLength: 1 + requestCount: + description: requestCount is a sum of all requestCounts across all users, even those outside of the top 10 users. + type: integer + format: int64 + minimum: 0 + requestCount: + description: requestCount is a sum of all requestCounts across nodes. + type: integer + format: int64 + minimum: 0 + removedInRelease: + description: removedInRelease is when the API will be removed. + type: string + maxLength: 64 + minLength: 0 + pattern: ^[0-9][0-9]*\.[0-9][0-9]*$ + requestCount: + description: requestCount is a sum of all requestCounts across all current hours, nodes, and users. + type: integer + format: int64 + minimum: 0 diff --git a/vendor/github.com/openshift/api/apiserver/v1/doc.go b/vendor/github.com/openshift/api/apiserver/v1/doc.go new file mode 100644 index 000000000..cc6a8aa61 --- /dev/null +++ b/vendor/github.com/openshift/api/apiserver/v1/doc.go @@ -0,0 +1,8 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true + +// +kubebuilder:validation:Optional +// +groupName=apiserver.openshift.io +// Package v1 is the v1 version of the API. +package v1 diff --git a/vendor/github.com/openshift/api/apiserver/v1/register.go b/vendor/github.com/openshift/api/apiserver/v1/register.go new file mode 100644 index 000000000..9d6e126e4 --- /dev/null +++ b/vendor/github.com/openshift/api/apiserver/v1/register.go @@ -0,0 +1,38 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + GroupName = "apiserver.openshift.io" + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // Install is a function which adds this version to a scheme + Install = schemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = schemeBuilder.AddToScheme +) + +// Resource generated code relies on this being here, but it logically belongs to the group +// DEPRECATED +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &APIRequestCount{}, + &APIRequestCountList{}, + ) + metav1.AddToGroupVersion(scheme, GroupVersion) + return nil +} diff --git a/vendor/github.com/openshift/api/apiserver/v1/stable.apirequestcount.testsuite.yaml b/vendor/github.com/openshift/api/apiserver/v1/stable.apirequestcount.testsuite.yaml new file mode 100644 index 000000000..f1e61eaff --- /dev/null +++ b/vendor/github.com/openshift/api/apiserver/v1/stable.apirequestcount.testsuite.yaml @@ -0,0 +1,15 @@ +apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this +name: "[Stable] API Server" +crd: apiserver.openshift.io_apirequestcount.yaml +tests: + onCreate: + - name: Should be able to create a minimal RoleBindingRestriction + initial: | + apiVersion: apiserver.openshift.io/v1 + kind: APIRequestCount + spec: {} # No spec is required for a APIRequestCount + expected: | + apiVersion: apiserver.openshift.io/v1 + kind: APIRequestCount + spec: + numberOfUsersToReport: 10 diff --git a/vendor/github.com/openshift/api/apiserver/v1/types_apirequestcount.go b/vendor/github.com/openshift/api/apiserver/v1/types_apirequestcount.go new file mode 100644 index 000000000..492c48b88 --- /dev/null +++ b/vendor/github.com/openshift/api/apiserver/v1/types_apirequestcount.go @@ -0,0 +1,171 @@ +// Package v1 is an api version in the apiserver.openshift.io group +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +const ( + // RemovedInReleaseLabel is a label which can be used to select APIRequestCounts based on the release + // in which they are removed. The value is equivalent to .status.removedInRelease. + RemovedInReleaseLabel = "apirequestcounts.apiserver.openshift.io/removedInRelease" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:resource:scope="Cluster" +// +kubebuilder:subresource:status +// +genclient:nonNamespaced +// +openshift:compatibility-gen:level=1 + +// APIRequestCount tracks requests made to an API. The instance name must +// be of the form `resource.version.group`, matching the resource. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +type APIRequestCount struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // spec defines the characteristics of the resource. + // +kubebuilder:validation:Required + // +required + Spec APIRequestCountSpec `json:"spec"` + + // status contains the observed state of the resource. + Status APIRequestCountStatus `json:"status,omitempty"` +} + +type APIRequestCountSpec struct { + + // numberOfUsersToReport is the number of users to include in the report. + // If unspecified or zero, the default is ten. This is default is subject to change. + // +kubebuilder:default:=10 + // +kubebuilder:validation:Minimum=0 + // +kubebuilder:validation:Maximum=100 + // +optional + NumberOfUsersToReport int64 `json:"numberOfUsersToReport"` +} + +// +k8s:deepcopy-gen=true +type APIRequestCountStatus struct { + + // conditions contains details of the current status of this API Resource. + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []metav1.Condition `json:"conditions" patchStrategy:"merge" patchMergeKey:"type"` + + // removedInRelease is when the API will be removed. + // +kubebuilder:validation:MinLength=0 + // +kubebuilder:validation:Pattern=^[0-9][0-9]*\.[0-9][0-9]*$ + // +kubebuilder:validation:MaxLength=64 + // +optional + RemovedInRelease string `json:"removedInRelease,omitempty"` + + // requestCount is a sum of all requestCounts across all current hours, nodes, and users. + // +kubebuilder:validation:Minimum=0 + // +required + RequestCount int64 `json:"requestCount"` + + // currentHour contains request history for the current hour. This is porcelain to make the API + // easier to read by humans seeing if they addressed a problem. This field is reset on the hour. + // +optional + CurrentHour PerResourceAPIRequestLog `json:"currentHour"` + + // last24h contains request history for the last 24 hours, indexed by the hour, so + // 12:00AM-12:59 is in index 0, 6am-6:59am is index 6, etc. The index of the current hour + // is updated live and then duplicated into the requestsLastHour field. + // +kubebuilder:validation:MaxItems=24 + // +optional + Last24h []PerResourceAPIRequestLog `json:"last24h"` +} + +// PerResourceAPIRequestLog logs request for various nodes. +type PerResourceAPIRequestLog struct { + + // byNode contains logs of requests per node. + // +kubebuilder:validation:MaxItems=512 + // +optional + ByNode []PerNodeAPIRequestLog `json:"byNode"` + + // requestCount is a sum of all requestCounts across nodes. + // +kubebuilder:validation:Minimum=0 + // +required + RequestCount int64 `json:"requestCount"` +} + +// PerNodeAPIRequestLog contains logs of requests to a certain node. +type PerNodeAPIRequestLog struct { + + // nodeName where the request are being handled. + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=512 + // +required + NodeName string `json:"nodeName"` + + // requestCount is a sum of all requestCounts across all users, even those outside of the top 10 users. + // +kubebuilder:validation:Minimum=0 + // +required + RequestCount int64 `json:"requestCount"` + + // byUser contains request details by top .spec.numberOfUsersToReport users. + // Note that because in the case of an apiserver, restart the list of top users is determined on a best-effort basis, + // the list might be imprecise. + // In addition, some system users may be explicitly included in the list. + // +kubebuilder:validation:MaxItems=500 + ByUser []PerUserAPIRequestCount `json:"byUser"` +} + +// PerUserAPIRequestCount contains logs of a user's requests. +type PerUserAPIRequestCount struct { + + // userName that made the request. + // +kubebuilder:validation:MaxLength=512 + UserName string `json:"username"` + + // userAgent that made the request. + // The same user often has multiple binaries which connect (pods with many containers). The different binaries + // will have different userAgents, but the same user. In addition, we have userAgents with version information + // embedded and the userName isn't likely to change. + // +kubebuilder:validation:MaxLength=1024 + UserAgent string `json:"userAgent"` + + // requestCount of requests by the user across all verbs. + // +kubebuilder:validation:Minimum=0 + // +required + RequestCount int64 `json:"requestCount"` + + // byVerb details by verb. + // +kubebuilder:validation:MaxItems=10 + ByVerb []PerVerbAPIRequestCount `json:"byVerb"` +} + +// PerVerbAPIRequestCount requestCounts requests by API request verb. +type PerVerbAPIRequestCount struct { + + // verb of API request (get, list, create, etc...) + // +kubebuilder:validation:MaxLength=20 + // +required + Verb string `json:"verb"` + + // requestCount of requests for verb. + // +kubebuilder:validation:Minimum=0 + // +required + RequestCount int64 `json:"requestCount"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +openshift:compatibility-gen:level=1 + +// APIRequestCountList is a list of APIRequestCount resources. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +type APIRequestCountList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + Items []APIRequestCount `json:"items"` +} diff --git a/vendor/github.com/openshift/api/apiserver/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/apiserver/v1/zz_generated.deepcopy.go new file mode 100644 index 000000000..79be37153 --- /dev/null +++ b/vendor/github.com/openshift/api/apiserver/v1/zz_generated.deepcopy.go @@ -0,0 +1,202 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIRequestCount) DeepCopyInto(out *APIRequestCount) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIRequestCount. +func (in *APIRequestCount) DeepCopy() *APIRequestCount { + if in == nil { + return nil + } + out := new(APIRequestCount) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *APIRequestCount) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIRequestCountList) DeepCopyInto(out *APIRequestCountList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]APIRequestCount, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIRequestCountList. +func (in *APIRequestCountList) DeepCopy() *APIRequestCountList { + if in == nil { + return nil + } + out := new(APIRequestCountList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *APIRequestCountList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIRequestCountSpec) DeepCopyInto(out *APIRequestCountSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIRequestCountSpec. +func (in *APIRequestCountSpec) DeepCopy() *APIRequestCountSpec { + if in == nil { + return nil + } + out := new(APIRequestCountSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIRequestCountStatus) DeepCopyInto(out *APIRequestCountStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.CurrentHour.DeepCopyInto(&out.CurrentHour) + if in.Last24h != nil { + in, out := &in.Last24h, &out.Last24h + *out = make([]PerResourceAPIRequestLog, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIRequestCountStatus. +func (in *APIRequestCountStatus) DeepCopy() *APIRequestCountStatus { + if in == nil { + return nil + } + out := new(APIRequestCountStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PerNodeAPIRequestLog) DeepCopyInto(out *PerNodeAPIRequestLog) { + *out = *in + if in.ByUser != nil { + in, out := &in.ByUser, &out.ByUser + *out = make([]PerUserAPIRequestCount, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PerNodeAPIRequestLog. +func (in *PerNodeAPIRequestLog) DeepCopy() *PerNodeAPIRequestLog { + if in == nil { + return nil + } + out := new(PerNodeAPIRequestLog) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PerResourceAPIRequestLog) DeepCopyInto(out *PerResourceAPIRequestLog) { + *out = *in + if in.ByNode != nil { + in, out := &in.ByNode, &out.ByNode + *out = make([]PerNodeAPIRequestLog, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PerResourceAPIRequestLog. +func (in *PerResourceAPIRequestLog) DeepCopy() *PerResourceAPIRequestLog { + if in == nil { + return nil + } + out := new(PerResourceAPIRequestLog) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PerUserAPIRequestCount) DeepCopyInto(out *PerUserAPIRequestCount) { + *out = *in + if in.ByVerb != nil { + in, out := &in.ByVerb, &out.ByVerb + *out = make([]PerVerbAPIRequestCount, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PerUserAPIRequestCount. +func (in *PerUserAPIRequestCount) DeepCopy() *PerUserAPIRequestCount { + if in == nil { + return nil + } + out := new(PerUserAPIRequestCount) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PerVerbAPIRequestCount) DeepCopyInto(out *PerVerbAPIRequestCount) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PerVerbAPIRequestCount. +func (in *PerVerbAPIRequestCount) DeepCopy() *PerVerbAPIRequestCount { + if in == nil { + return nil + } + out := new(PerVerbAPIRequestCount) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/api/apiserver/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/apiserver/v1/zz_generated.swagger_doc_generated.go new file mode 100644 index 000000000..27d74b6c1 --- /dev/null +++ b/vendor/github.com/openshift/api/apiserver/v1/zz_generated.swagger_doc_generated.go @@ -0,0 +1,97 @@ +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_APIRequestCount = map[string]string{ + "": "APIRequestCount tracks requests made to an API. The instance name must be of the form `resource.version.group`, matching the resource.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec defines the characteristics of the resource.", + "status": "status contains the observed state of the resource.", +} + +func (APIRequestCount) SwaggerDoc() map[string]string { + return map_APIRequestCount +} + +var map_APIRequestCountList = map[string]string{ + "": "APIRequestCountList is a list of APIRequestCount resources.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (APIRequestCountList) SwaggerDoc() map[string]string { + return map_APIRequestCountList +} + +var map_APIRequestCountSpec = map[string]string{ + "numberOfUsersToReport": "numberOfUsersToReport is the number of users to include in the report. If unspecified or zero, the default is ten. This is default is subject to change.", +} + +func (APIRequestCountSpec) SwaggerDoc() map[string]string { + return map_APIRequestCountSpec +} + +var map_APIRequestCountStatus = map[string]string{ + "conditions": "conditions contains details of the current status of this API Resource.", + "removedInRelease": "removedInRelease is when the API will be removed.", + "requestCount": "requestCount is a sum of all requestCounts across all current hours, nodes, and users.", + "currentHour": "currentHour contains request history for the current hour. This is porcelain to make the API easier to read by humans seeing if they addressed a problem. This field is reset on the hour.", + "last24h": "last24h contains request history for the last 24 hours, indexed by the hour, so 12:00AM-12:59 is in index 0, 6am-6:59am is index 6, etc. The index of the current hour is updated live and then duplicated into the requestsLastHour field.", +} + +func (APIRequestCountStatus) SwaggerDoc() map[string]string { + return map_APIRequestCountStatus +} + +var map_PerNodeAPIRequestLog = map[string]string{ + "": "PerNodeAPIRequestLog contains logs of requests to a certain node.", + "nodeName": "nodeName where the request are being handled.", + "requestCount": "requestCount is a sum of all requestCounts across all users, even those outside of the top 10 users.", + "byUser": "byUser contains request details by top .spec.numberOfUsersToReport users. Note that because in the case of an apiserver, restart the list of top users is determined on a best-effort basis, the list might be imprecise. In addition, some system users may be explicitly included in the list.", +} + +func (PerNodeAPIRequestLog) SwaggerDoc() map[string]string { + return map_PerNodeAPIRequestLog +} + +var map_PerResourceAPIRequestLog = map[string]string{ + "": "PerResourceAPIRequestLog logs request for various nodes.", + "byNode": "byNode contains logs of requests per node.", + "requestCount": "requestCount is a sum of all requestCounts across nodes.", +} + +func (PerResourceAPIRequestLog) SwaggerDoc() map[string]string { + return map_PerResourceAPIRequestLog +} + +var map_PerUserAPIRequestCount = map[string]string{ + "": "PerUserAPIRequestCount contains logs of a user's requests.", + "username": "userName that made the request.", + "userAgent": "userAgent that made the request. The same user often has multiple binaries which connect (pods with many containers). The different binaries will have different userAgents, but the same user. In addition, we have userAgents with version information embedded and the userName isn't likely to change.", + "requestCount": "requestCount of requests by the user across all verbs.", + "byVerb": "byVerb details by verb.", +} + +func (PerUserAPIRequestCount) SwaggerDoc() map[string]string { + return map_PerUserAPIRequestCount +} + +var map_PerVerbAPIRequestCount = map[string]string{ + "": "PerVerbAPIRequestCount requestCounts requests by API request verb.", + "verb": "verb of API request (get, list, create, etc...)", + "requestCount": "requestCount of requests for verb.", +} + +func (PerVerbAPIRequestCount) SwaggerDoc() map[string]string { + return map_PerVerbAPIRequestCount +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/api/apps/OWNERS b/vendor/github.com/openshift/api/apps/OWNERS new file mode 100644 index 000000000..d8d669b91 --- /dev/null +++ b/vendor/github.com/openshift/api/apps/OWNERS @@ -0,0 +1,3 @@ +reviewers: + - mfojtik + - soltysh diff --git a/vendor/github.com/openshift/api/apps/install.go b/vendor/github.com/openshift/api/apps/install.go new file mode 100644 index 000000000..80f7ba2b2 --- /dev/null +++ b/vendor/github.com/openshift/api/apps/install.go @@ -0,0 +1,26 @@ +package apps + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + appsv1 "github.com/openshift/api/apps/v1" +) + +const ( + GroupName = "apps.openshift.io" +) + +var ( + schemeBuilder = runtime.NewSchemeBuilder(appsv1.Install) + // Install is a function which adds every version of this group to a scheme + Install = schemeBuilder.AddToScheme +) + +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +func Kind(kind string) schema.GroupKind { + return schema.GroupKind{Group: GroupName, Kind: kind} +} diff --git a/vendor/github.com/openshift/api/apps/v1/consts.go b/vendor/github.com/openshift/api/apps/v1/consts.go new file mode 100644 index 000000000..212578bcc --- /dev/null +++ b/vendor/github.com/openshift/api/apps/v1/consts.go @@ -0,0 +1,108 @@ +package v1 + +const ( + // DeploymentStatusReasonAnnotation represents the reason for deployment being in a given state + // Used for specifying the reason for cancellation or failure of a deployment + // This is on replication controller set by deployer controller. + DeploymentStatusReasonAnnotation = "openshift.io/deployment.status-reason" + + // DeploymentPodAnnotation is an annotation on a deployment (a ReplicationController). The + // annotation value is the name of the deployer Pod which will act upon the ReplicationController + // to implement the deployment behavior. + // This is set on replication controller by deployer controller. + DeploymentPodAnnotation = "openshift.io/deployer-pod.name" + + // DeploymentConfigAnnotation is an annotation name used to correlate a deployment with the + // DeploymentConfig on which the deployment is based. + // This is set on replication controller pod template by deployer controller. + DeploymentConfigAnnotation = "openshift.io/deployment-config.name" + + // DeploymentCancelledAnnotation indicates that the deployment has been cancelled + // The annotation value does not matter and its mere presence indicates cancellation. + // This is set on replication controller by deployment config controller or oc rollout cancel command. + DeploymentCancelledAnnotation = "openshift.io/deployment.cancelled" + + // DeploymentEncodedConfigAnnotation is an annotation name used to retrieve specific encoded + // DeploymentConfig on which a given deployment is based. + // This is set on replication controller by deployer controller. + DeploymentEncodedConfigAnnotation = "openshift.io/encoded-deployment-config" + + // DeploymentVersionAnnotation is an annotation on a deployment (a ReplicationController). The + // annotation value is the LatestVersion value of the DeploymentConfig which was the basis for + // the deployment. + // This is set on replication controller pod template by deployment config controller. + DeploymentVersionAnnotation = "openshift.io/deployment-config.latest-version" + + // DeployerPodForDeploymentLabel is a label which groups pods related to a + // deployment. The value is a deployment name. The deployer pod and hook pods + // created by the internal strategies will have this label. Custom + // strategies can apply this label to any pods they create, enabling + // platform-provided cancellation and garbage collection support. + // This is set on deployer pod by deployer controller. + DeployerPodForDeploymentLabel = "openshift.io/deployer-pod-for.name" + + // DeploymentStatusAnnotation is an annotation name used to retrieve the DeploymentPhase of + // a deployment. + // This is set on replication controller by deployer controller. + DeploymentStatusAnnotation = "openshift.io/deployment.phase" +) + +type DeploymentConditionReason string + +var ( + // ReplicationControllerUpdatedReason is added in a deployment config when one of its replication + // controllers is updated as part of the rollout process. + ReplicationControllerUpdatedReason DeploymentConditionReason = "ReplicationControllerUpdated" + + // ReplicationControllerCreateError is added in a deployment config when it cannot create a new replication + // controller. + ReplicationControllerCreateErrorReason DeploymentConditionReason = "ReplicationControllerCreateError" + + // ReplicationControllerCreatedReason is added in a deployment config when it creates a new replication + // controller. + NewReplicationControllerCreatedReason DeploymentConditionReason = "NewReplicationControllerCreated" + + // NewReplicationControllerAvailableReason is added in a deployment config when its newest replication controller is made + // available ie. the number of new pods that have passed readiness checks and run for at least + // minReadySeconds is at least the minimum available pods that need to run for the deployment config. + NewReplicationControllerAvailableReason DeploymentConditionReason = "NewReplicationControllerAvailable" + + // ProgressDeadlineExceededReason is added in a deployment config when its newest replication controller fails to show + // any progress within the given deadline (progressDeadlineSeconds). + ProgressDeadlineExceededReason DeploymentConditionReason = "ProgressDeadlineExceeded" + + // DeploymentConfigPausedReason is added in a deployment config when it is paused. Lack of progress shouldn't be + // estimated once a deployment config is paused. + DeploymentConfigPausedReason DeploymentConditionReason = "DeploymentConfigPaused" + + // DeploymentConfigResumedReason is added in a deployment config when it is resumed. Useful for not failing accidentally + // deployment configs that paused amidst a rollout. + DeploymentConfigResumedReason DeploymentConditionReason = "DeploymentConfigResumed" + + // RolloutCancelledReason is added in a deployment config when its newest rollout was + // interrupted by cancellation. + RolloutCancelledReason DeploymentConditionReason = "RolloutCancelled" +) + +// DeploymentStatus describes the possible states a deployment can be in. +type DeploymentStatus string + +var ( + + // DeploymentStatusNew means the deployment has been accepted but not yet acted upon. + DeploymentStatusNew DeploymentStatus = "New" + + // DeploymentStatusPending means the deployment been handed over to a deployment strategy, + // but the strategy has not yet declared the deployment to be running. + DeploymentStatusPending DeploymentStatus = "Pending" + + // DeploymentStatusRunning means the deployment strategy has reported the deployment as + // being in-progress. + DeploymentStatusRunning DeploymentStatus = "Running" + + // DeploymentStatusComplete means the deployment finished without an error. + DeploymentStatusComplete DeploymentStatus = "Complete" + + // DeploymentStatusFailed means the deployment finished with an error. + DeploymentStatusFailed DeploymentStatus = "Failed" +) diff --git a/vendor/github.com/openshift/api/apps/v1/deprecated_consts.go b/vendor/github.com/openshift/api/apps/v1/deprecated_consts.go new file mode 100644 index 000000000..31969786c --- /dev/null +++ b/vendor/github.com/openshift/api/apps/v1/deprecated_consts.go @@ -0,0 +1,38 @@ +package v1 + +// This file contains consts that are not shared between components and set just internally. +// They will likely be removed in (near) future. + +const ( + // DeployerPodCreatedAtAnnotation is an annotation on a deployment that + // records the time in RFC3339 format of when the deployer pod for this particular + // deployment was created. + // This is set by deployer controller, but not consumed by any command or internally. + // DEPRECATED: will be removed soon + DeployerPodCreatedAtAnnotation = "openshift.io/deployer-pod.created-at" + + // DeployerPodStartedAtAnnotation is an annotation on a deployment that + // records the time in RFC3339 format of when the deployer pod for this particular + // deployment was started. + // This is set by deployer controller, but not consumed by any command or internally. + // DEPRECATED: will be removed soon + DeployerPodStartedAtAnnotation = "openshift.io/deployer-pod.started-at" + + // DeployerPodCompletedAtAnnotation is an annotation on deployment that records + // the time in RFC3339 format of when the deployer pod finished. + // This is set by deployer controller, but not consumed by any command or internally. + // DEPRECATED: will be removed soon + DeployerPodCompletedAtAnnotation = "openshift.io/deployer-pod.completed-at" + + // DesiredReplicasAnnotation represents the desired number of replicas for a + // new deployment. + // This is set by deployer controller, but not consumed by any command or internally. + // DEPRECATED: will be removed soon + DesiredReplicasAnnotation = "kubectl.kubernetes.io/desired-replicas" + + // DeploymentAnnotation is an annotation on a deployer Pod. The annotation value is the name + // of the deployment (a ReplicationController) on which the deployer Pod acts. + // This is set by deployer controller and consumed internally and in oc adm top command. + // DEPRECATED: will be removed soon + DeploymentAnnotation = "openshift.io/deployment.name" +) diff --git a/vendor/github.com/openshift/api/apps/v1/doc.go b/vendor/github.com/openshift/api/apps/v1/doc.go new file mode 100644 index 000000000..f0fb3f59a --- /dev/null +++ b/vendor/github.com/openshift/api/apps/v1/doc.go @@ -0,0 +1,9 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=github.com/openshift/origin/pkg/apps/apis/apps +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true +// +k8s:prerelease-lifecycle-gen=true + +// +groupName=apps.openshift.io +// Package v1 is the v1 version of the API. +package v1 diff --git a/vendor/github.com/openshift/api/apps/v1/generated.pb.go b/vendor/github.com/openshift/api/apps/v1/generated.pb.go new file mode 100644 index 000000000..18ed8b931 --- /dev/null +++ b/vendor/github.com/openshift/api/apps/v1/generated.pb.go @@ -0,0 +1,7461 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/openshift/api/apps/v1/generated.proto + +package v1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" + v11 "k8s.io/apimachinery/pkg/apis/meta/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" + + intstr "k8s.io/apimachinery/pkg/util/intstr" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *CustomDeploymentStrategyParams) Reset() { *m = CustomDeploymentStrategyParams{} } +func (*CustomDeploymentStrategyParams) ProtoMessage() {} +func (*CustomDeploymentStrategyParams) Descriptor() ([]byte, []int) { + return fileDescriptor_8f1b1bee37da74c1, []int{0} +} +func (m *CustomDeploymentStrategyParams) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomDeploymentStrategyParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomDeploymentStrategyParams) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomDeploymentStrategyParams.Merge(m, src) +} +func (m *CustomDeploymentStrategyParams) XXX_Size() int { + return m.Size() +} +func (m *CustomDeploymentStrategyParams) XXX_DiscardUnknown() { + xxx_messageInfo_CustomDeploymentStrategyParams.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomDeploymentStrategyParams proto.InternalMessageInfo + +func (m *DeploymentCause) Reset() { *m = DeploymentCause{} } +func (*DeploymentCause) ProtoMessage() {} +func (*DeploymentCause) Descriptor() ([]byte, []int) { + return fileDescriptor_8f1b1bee37da74c1, []int{1} +} +func (m *DeploymentCause) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentCause) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentCause) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentCause.Merge(m, src) +} +func (m *DeploymentCause) XXX_Size() int { + return m.Size() +} +func (m *DeploymentCause) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentCause.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentCause proto.InternalMessageInfo + +func (m *DeploymentCauseImageTrigger) Reset() { *m = DeploymentCauseImageTrigger{} } +func (*DeploymentCauseImageTrigger) ProtoMessage() {} +func (*DeploymentCauseImageTrigger) Descriptor() ([]byte, []int) { + return fileDescriptor_8f1b1bee37da74c1, []int{2} +} +func (m *DeploymentCauseImageTrigger) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentCauseImageTrigger) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentCauseImageTrigger) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentCauseImageTrigger.Merge(m, src) +} +func (m *DeploymentCauseImageTrigger) XXX_Size() int { + return m.Size() +} +func (m *DeploymentCauseImageTrigger) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentCauseImageTrigger.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentCauseImageTrigger proto.InternalMessageInfo + +func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} } +func (*DeploymentCondition) ProtoMessage() {} +func (*DeploymentCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_8f1b1bee37da74c1, []int{3} +} +func (m *DeploymentCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentCondition.Merge(m, src) +} +func (m *DeploymentCondition) XXX_Size() int { + return m.Size() +} +func (m *DeploymentCondition) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentCondition proto.InternalMessageInfo + +func (m *DeploymentConfig) Reset() { *m = DeploymentConfig{} } +func (*DeploymentConfig) ProtoMessage() {} +func (*DeploymentConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_8f1b1bee37da74c1, []int{4} +} +func (m *DeploymentConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentConfig.Merge(m, src) +} +func (m *DeploymentConfig) XXX_Size() int { + return m.Size() +} +func (m *DeploymentConfig) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentConfig proto.InternalMessageInfo + +func (m *DeploymentConfigList) Reset() { *m = DeploymentConfigList{} } +func (*DeploymentConfigList) ProtoMessage() {} +func (*DeploymentConfigList) Descriptor() ([]byte, []int) { + return fileDescriptor_8f1b1bee37da74c1, []int{5} +} +func (m *DeploymentConfigList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentConfigList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentConfigList) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentConfigList.Merge(m, src) +} +func (m *DeploymentConfigList) XXX_Size() int { + return m.Size() +} +func (m *DeploymentConfigList) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentConfigList.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentConfigList proto.InternalMessageInfo + +func (m *DeploymentConfigRollback) Reset() { *m = DeploymentConfigRollback{} } +func (*DeploymentConfigRollback) ProtoMessage() {} +func (*DeploymentConfigRollback) Descriptor() ([]byte, []int) { + return fileDescriptor_8f1b1bee37da74c1, []int{6} +} +func (m *DeploymentConfigRollback) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentConfigRollback) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentConfigRollback) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentConfigRollback.Merge(m, src) +} +func (m *DeploymentConfigRollback) XXX_Size() int { + return m.Size() +} +func (m *DeploymentConfigRollback) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentConfigRollback.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentConfigRollback proto.InternalMessageInfo + +func (m *DeploymentConfigRollbackSpec) Reset() { *m = DeploymentConfigRollbackSpec{} } +func (*DeploymentConfigRollbackSpec) ProtoMessage() {} +func (*DeploymentConfigRollbackSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_8f1b1bee37da74c1, []int{7} +} +func (m *DeploymentConfigRollbackSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentConfigRollbackSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentConfigRollbackSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentConfigRollbackSpec.Merge(m, src) +} +func (m *DeploymentConfigRollbackSpec) XXX_Size() int { + return m.Size() +} +func (m *DeploymentConfigRollbackSpec) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentConfigRollbackSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentConfigRollbackSpec proto.InternalMessageInfo + +func (m *DeploymentConfigSpec) Reset() { *m = DeploymentConfigSpec{} } +func (*DeploymentConfigSpec) ProtoMessage() {} +func (*DeploymentConfigSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_8f1b1bee37da74c1, []int{8} +} +func (m *DeploymentConfigSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentConfigSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentConfigSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentConfigSpec.Merge(m, src) +} +func (m *DeploymentConfigSpec) XXX_Size() int { + return m.Size() +} +func (m *DeploymentConfigSpec) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentConfigSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentConfigSpec proto.InternalMessageInfo + +func (m *DeploymentConfigStatus) Reset() { *m = DeploymentConfigStatus{} } +func (*DeploymentConfigStatus) ProtoMessage() {} +func (*DeploymentConfigStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_8f1b1bee37da74c1, []int{9} +} +func (m *DeploymentConfigStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentConfigStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentConfigStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentConfigStatus.Merge(m, src) +} +func (m *DeploymentConfigStatus) XXX_Size() int { + return m.Size() +} +func (m *DeploymentConfigStatus) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentConfigStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentConfigStatus proto.InternalMessageInfo + +func (m *DeploymentDetails) Reset() { *m = DeploymentDetails{} } +func (*DeploymentDetails) ProtoMessage() {} +func (*DeploymentDetails) Descriptor() ([]byte, []int) { + return fileDescriptor_8f1b1bee37da74c1, []int{10} +} +func (m *DeploymentDetails) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentDetails) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentDetails) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentDetails.Merge(m, src) +} +func (m *DeploymentDetails) XXX_Size() int { + return m.Size() +} +func (m *DeploymentDetails) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentDetails.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentDetails proto.InternalMessageInfo + +func (m *DeploymentLog) Reset() { *m = DeploymentLog{} } +func (*DeploymentLog) ProtoMessage() {} +func (*DeploymentLog) Descriptor() ([]byte, []int) { + return fileDescriptor_8f1b1bee37da74c1, []int{11} +} +func (m *DeploymentLog) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentLog) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentLog) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentLog.Merge(m, src) +} +func (m *DeploymentLog) XXX_Size() int { + return m.Size() +} +func (m *DeploymentLog) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentLog.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentLog proto.InternalMessageInfo + +func (m *DeploymentLogOptions) Reset() { *m = DeploymentLogOptions{} } +func (*DeploymentLogOptions) ProtoMessage() {} +func (*DeploymentLogOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_8f1b1bee37da74c1, []int{12} +} +func (m *DeploymentLogOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentLogOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentLogOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentLogOptions.Merge(m, src) +} +func (m *DeploymentLogOptions) XXX_Size() int { + return m.Size() +} +func (m *DeploymentLogOptions) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentLogOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentLogOptions proto.InternalMessageInfo + +func (m *DeploymentRequest) Reset() { *m = DeploymentRequest{} } +func (*DeploymentRequest) ProtoMessage() {} +func (*DeploymentRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_8f1b1bee37da74c1, []int{13} +} +func (m *DeploymentRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentRequest.Merge(m, src) +} +func (m *DeploymentRequest) XXX_Size() int { + return m.Size() +} +func (m *DeploymentRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentRequest proto.InternalMessageInfo + +func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } +func (*DeploymentStrategy) ProtoMessage() {} +func (*DeploymentStrategy) Descriptor() ([]byte, []int) { + return fileDescriptor_8f1b1bee37da74c1, []int{14} +} +func (m *DeploymentStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentStrategy.Merge(m, src) +} +func (m *DeploymentStrategy) XXX_Size() int { + return m.Size() +} +func (m *DeploymentStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentStrategy.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentStrategy proto.InternalMessageInfo + +func (m *DeploymentTriggerImageChangeParams) Reset() { *m = DeploymentTriggerImageChangeParams{} } +func (*DeploymentTriggerImageChangeParams) ProtoMessage() {} +func (*DeploymentTriggerImageChangeParams) Descriptor() ([]byte, []int) { + return fileDescriptor_8f1b1bee37da74c1, []int{15} +} +func (m *DeploymentTriggerImageChangeParams) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentTriggerImageChangeParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentTriggerImageChangeParams) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentTriggerImageChangeParams.Merge(m, src) +} +func (m *DeploymentTriggerImageChangeParams) XXX_Size() int { + return m.Size() +} +func (m *DeploymentTriggerImageChangeParams) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentTriggerImageChangeParams.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentTriggerImageChangeParams proto.InternalMessageInfo + +func (m *DeploymentTriggerPolicies) Reset() { *m = DeploymentTriggerPolicies{} } +func (*DeploymentTriggerPolicies) ProtoMessage() {} +func (*DeploymentTriggerPolicies) Descriptor() ([]byte, []int) { + return fileDescriptor_8f1b1bee37da74c1, []int{16} +} +func (m *DeploymentTriggerPolicies) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentTriggerPolicies) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentTriggerPolicies) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentTriggerPolicies.Merge(m, src) +} +func (m *DeploymentTriggerPolicies) XXX_Size() int { + return m.Size() +} +func (m *DeploymentTriggerPolicies) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentTriggerPolicies.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentTriggerPolicies proto.InternalMessageInfo + +func (m *DeploymentTriggerPolicy) Reset() { *m = DeploymentTriggerPolicy{} } +func (*DeploymentTriggerPolicy) ProtoMessage() {} +func (*DeploymentTriggerPolicy) Descriptor() ([]byte, []int) { + return fileDescriptor_8f1b1bee37da74c1, []int{17} +} +func (m *DeploymentTriggerPolicy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentTriggerPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentTriggerPolicy) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentTriggerPolicy.Merge(m, src) +} +func (m *DeploymentTriggerPolicy) XXX_Size() int { + return m.Size() +} +func (m *DeploymentTriggerPolicy) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentTriggerPolicy.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentTriggerPolicy proto.InternalMessageInfo + +func (m *ExecNewPodHook) Reset() { *m = ExecNewPodHook{} } +func (*ExecNewPodHook) ProtoMessage() {} +func (*ExecNewPodHook) Descriptor() ([]byte, []int) { + return fileDescriptor_8f1b1bee37da74c1, []int{18} +} +func (m *ExecNewPodHook) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExecNewPodHook) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExecNewPodHook) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExecNewPodHook.Merge(m, src) +} +func (m *ExecNewPodHook) XXX_Size() int { + return m.Size() +} +func (m *ExecNewPodHook) XXX_DiscardUnknown() { + xxx_messageInfo_ExecNewPodHook.DiscardUnknown(m) +} + +var xxx_messageInfo_ExecNewPodHook proto.InternalMessageInfo + +func (m *LifecycleHook) Reset() { *m = LifecycleHook{} } +func (*LifecycleHook) ProtoMessage() {} +func (*LifecycleHook) Descriptor() ([]byte, []int) { + return fileDescriptor_8f1b1bee37da74c1, []int{19} +} +func (m *LifecycleHook) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LifecycleHook) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LifecycleHook) XXX_Merge(src proto.Message) { + xxx_messageInfo_LifecycleHook.Merge(m, src) +} +func (m *LifecycleHook) XXX_Size() int { + return m.Size() +} +func (m *LifecycleHook) XXX_DiscardUnknown() { + xxx_messageInfo_LifecycleHook.DiscardUnknown(m) +} + +var xxx_messageInfo_LifecycleHook proto.InternalMessageInfo + +func (m *RecreateDeploymentStrategyParams) Reset() { *m = RecreateDeploymentStrategyParams{} } +func (*RecreateDeploymentStrategyParams) ProtoMessage() {} +func (*RecreateDeploymentStrategyParams) Descriptor() ([]byte, []int) { + return fileDescriptor_8f1b1bee37da74c1, []int{20} +} +func (m *RecreateDeploymentStrategyParams) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RecreateDeploymentStrategyParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RecreateDeploymentStrategyParams) XXX_Merge(src proto.Message) { + xxx_messageInfo_RecreateDeploymentStrategyParams.Merge(m, src) +} +func (m *RecreateDeploymentStrategyParams) XXX_Size() int { + return m.Size() +} +func (m *RecreateDeploymentStrategyParams) XXX_DiscardUnknown() { + xxx_messageInfo_RecreateDeploymentStrategyParams.DiscardUnknown(m) +} + +var xxx_messageInfo_RecreateDeploymentStrategyParams proto.InternalMessageInfo + +func (m *RollingDeploymentStrategyParams) Reset() { *m = RollingDeploymentStrategyParams{} } +func (*RollingDeploymentStrategyParams) ProtoMessage() {} +func (*RollingDeploymentStrategyParams) Descriptor() ([]byte, []int) { + return fileDescriptor_8f1b1bee37da74c1, []int{21} +} +func (m *RollingDeploymentStrategyParams) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RollingDeploymentStrategyParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RollingDeploymentStrategyParams) XXX_Merge(src proto.Message) { + xxx_messageInfo_RollingDeploymentStrategyParams.Merge(m, src) +} +func (m *RollingDeploymentStrategyParams) XXX_Size() int { + return m.Size() +} +func (m *RollingDeploymentStrategyParams) XXX_DiscardUnknown() { + xxx_messageInfo_RollingDeploymentStrategyParams.DiscardUnknown(m) +} + +var xxx_messageInfo_RollingDeploymentStrategyParams proto.InternalMessageInfo + +func (m *TagImageHook) Reset() { *m = TagImageHook{} } +func (*TagImageHook) ProtoMessage() {} +func (*TagImageHook) Descriptor() ([]byte, []int) { + return fileDescriptor_8f1b1bee37da74c1, []int{22} +} +func (m *TagImageHook) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TagImageHook) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TagImageHook) XXX_Merge(src proto.Message) { + xxx_messageInfo_TagImageHook.Merge(m, src) +} +func (m *TagImageHook) XXX_Size() int { + return m.Size() +} +func (m *TagImageHook) XXX_DiscardUnknown() { + xxx_messageInfo_TagImageHook.DiscardUnknown(m) +} + +var xxx_messageInfo_TagImageHook proto.InternalMessageInfo + +func init() { + proto.RegisterType((*CustomDeploymentStrategyParams)(nil), "github.com.openshift.api.apps.v1.CustomDeploymentStrategyParams") + proto.RegisterType((*DeploymentCause)(nil), "github.com.openshift.api.apps.v1.DeploymentCause") + proto.RegisterType((*DeploymentCauseImageTrigger)(nil), "github.com.openshift.api.apps.v1.DeploymentCauseImageTrigger") + proto.RegisterType((*DeploymentCondition)(nil), "github.com.openshift.api.apps.v1.DeploymentCondition") + proto.RegisterType((*DeploymentConfig)(nil), "github.com.openshift.api.apps.v1.DeploymentConfig") + proto.RegisterType((*DeploymentConfigList)(nil), "github.com.openshift.api.apps.v1.DeploymentConfigList") + proto.RegisterType((*DeploymentConfigRollback)(nil), "github.com.openshift.api.apps.v1.DeploymentConfigRollback") + proto.RegisterMapType((map[string]string)(nil), "github.com.openshift.api.apps.v1.DeploymentConfigRollback.UpdatedAnnotationsEntry") + proto.RegisterType((*DeploymentConfigRollbackSpec)(nil), "github.com.openshift.api.apps.v1.DeploymentConfigRollbackSpec") + proto.RegisterType((*DeploymentConfigSpec)(nil), "github.com.openshift.api.apps.v1.DeploymentConfigSpec") + proto.RegisterMapType((map[string]string)(nil), "github.com.openshift.api.apps.v1.DeploymentConfigSpec.SelectorEntry") + proto.RegisterType((*DeploymentConfigStatus)(nil), "github.com.openshift.api.apps.v1.DeploymentConfigStatus") + proto.RegisterType((*DeploymentDetails)(nil), "github.com.openshift.api.apps.v1.DeploymentDetails") + proto.RegisterType((*DeploymentLog)(nil), "github.com.openshift.api.apps.v1.DeploymentLog") + proto.RegisterType((*DeploymentLogOptions)(nil), "github.com.openshift.api.apps.v1.DeploymentLogOptions") + proto.RegisterType((*DeploymentRequest)(nil), "github.com.openshift.api.apps.v1.DeploymentRequest") + proto.RegisterType((*DeploymentStrategy)(nil), "github.com.openshift.api.apps.v1.DeploymentStrategy") + proto.RegisterMapType((map[string]string)(nil), "github.com.openshift.api.apps.v1.DeploymentStrategy.AnnotationsEntry") + proto.RegisterMapType((map[string]string)(nil), "github.com.openshift.api.apps.v1.DeploymentStrategy.LabelsEntry") + proto.RegisterType((*DeploymentTriggerImageChangeParams)(nil), "github.com.openshift.api.apps.v1.DeploymentTriggerImageChangeParams") + proto.RegisterType((*DeploymentTriggerPolicies)(nil), "github.com.openshift.api.apps.v1.DeploymentTriggerPolicies") + proto.RegisterType((*DeploymentTriggerPolicy)(nil), "github.com.openshift.api.apps.v1.DeploymentTriggerPolicy") + proto.RegisterType((*ExecNewPodHook)(nil), "github.com.openshift.api.apps.v1.ExecNewPodHook") + proto.RegisterType((*LifecycleHook)(nil), "github.com.openshift.api.apps.v1.LifecycleHook") + proto.RegisterType((*RecreateDeploymentStrategyParams)(nil), "github.com.openshift.api.apps.v1.RecreateDeploymentStrategyParams") + proto.RegisterType((*RollingDeploymentStrategyParams)(nil), "github.com.openshift.api.apps.v1.RollingDeploymentStrategyParams") + proto.RegisterType((*TagImageHook)(nil), "github.com.openshift.api.apps.v1.TagImageHook") +} + +func init() { + proto.RegisterFile("github.com/openshift/api/apps/v1/generated.proto", fileDescriptor_8f1b1bee37da74c1) +} + +var fileDescriptor_8f1b1bee37da74c1 = []byte{ + // 2523 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x5a, 0xcd, 0x6f, 0x1c, 0x49, + 0x15, 0x77, 0x7b, 0x66, 0xec, 0x99, 0xe7, 0xaf, 0xb8, 0x9c, 0x8f, 0x59, 0x2f, 0xf2, 0x58, 0xb3, + 0xda, 0xc5, 0xc0, 0x32, 0xb3, 0xf1, 0x86, 0xd5, 0x26, 0xd1, 0x2e, 0x78, 0x1c, 0x67, 0xd7, 0xd1, + 0x38, 0x31, 0x65, 0x27, 0x21, 0x11, 0x82, 0x94, 0x7b, 0xca, 0xe3, 0x5a, 0x77, 0x77, 0x0d, 0xdd, + 0x35, 0x93, 0x0c, 0x42, 0x68, 0x2f, 0x20, 0x21, 0xed, 0x81, 0x23, 0x5c, 0x10, 0x07, 0xae, 0x20, + 0x0e, 0xdc, 0x11, 0x07, 0xa4, 0x1c, 0x40, 0x5a, 0x09, 0x09, 0x56, 0x08, 0x59, 0x1b, 0x73, 0xe3, + 0x4f, 0xc8, 0x09, 0xd5, 0x47, 0x7f, 0xcd, 0x47, 0xec, 0x71, 0x72, 0x73, 0xbf, 0x8f, 0xdf, 0x7b, + 0xf5, 0xea, 0xbd, 0x57, 0xaf, 0x6a, 0x0c, 0xef, 0x34, 0x99, 0x38, 0x68, 0xef, 0x55, 0x6c, 0xee, + 0x56, 0x79, 0x8b, 0x7a, 0xc1, 0x01, 0xdb, 0x17, 0x55, 0xd2, 0x62, 0x55, 0xd2, 0x6a, 0x05, 0xd5, + 0xce, 0xe5, 0x6a, 0x93, 0x7a, 0xd4, 0x27, 0x82, 0x36, 0x2a, 0x2d, 0x9f, 0x0b, 0x8e, 0x96, 0x63, + 0x8d, 0x4a, 0xa4, 0x51, 0x21, 0x2d, 0x56, 0x91, 0x1a, 0x95, 0xce, 0xe5, 0xc5, 0x6f, 0x26, 0x30, + 0x9b, 0xbc, 0xc9, 0xab, 0x4a, 0x71, 0xaf, 0xbd, 0xaf, 0xbe, 0xd4, 0x87, 0xfa, 0x4b, 0x03, 0x2e, + 0x96, 0x0f, 0xdf, 0x0f, 0x2a, 0x8c, 0x2b, 0xa3, 0x36, 0xf7, 0xe9, 0x00, 0xa3, 0x8b, 0x57, 0x62, + 0x19, 0x97, 0xd8, 0x07, 0xcc, 0xa3, 0x7e, 0xb7, 0xda, 0x3a, 0x6c, 0x4a, 0x42, 0x50, 0x75, 0xa9, + 0x20, 0x83, 0xb4, 0xde, 0x1b, 0xa6, 0xe5, 0xb7, 0x3d, 0xc1, 0x5c, 0x5a, 0x0d, 0xec, 0x03, 0xea, + 0x92, 0x3e, 0xbd, 0x77, 0x87, 0xe9, 0xb5, 0x05, 0x73, 0xaa, 0xcc, 0x13, 0x81, 0xf0, 0x7b, 0x95, + 0xca, 0x7f, 0xb6, 0x60, 0x69, 0xbd, 0x1d, 0x08, 0xee, 0xde, 0xa0, 0x2d, 0x87, 0x77, 0x5d, 0xea, + 0x89, 0x1d, 0x21, 0x25, 0x9a, 0xdd, 0x6d, 0xe2, 0x13, 0x37, 0x40, 0x6f, 0x40, 0x8e, 0xb9, 0xa4, + 0x49, 0x8b, 0xd6, 0xb2, 0xb5, 0x52, 0xa8, 0xcd, 0x3c, 0x3d, 0x2a, 0x8d, 0x1d, 0x1f, 0x95, 0x72, + 0x9b, 0x92, 0x88, 0x35, 0x0f, 0x7d, 0x17, 0xa6, 0xa8, 0xd7, 0x61, 0x3e, 0xf7, 0x24, 0x42, 0x71, + 0x7c, 0x39, 0xb3, 0x32, 0xb5, 0xba, 0x58, 0xd1, 0x2e, 0xa9, 0x38, 0xcb, 0x20, 0x55, 0x3a, 0x97, + 0x2b, 0x1b, 0x5e, 0xe7, 0x1e, 0xf1, 0x6b, 0x0b, 0x06, 0x66, 0x6a, 0x23, 0x56, 0xc3, 0x49, 0x0c, + 0xf4, 0x26, 0x4c, 0xda, 0xdc, 0x75, 0x89, 0xd7, 0x28, 0x66, 0x96, 0x33, 0x2b, 0x85, 0xda, 0xd4, + 0xf1, 0x51, 0x69, 0x72, 0x5d, 0x93, 0x70, 0xc8, 0x2b, 0xff, 0xc5, 0x82, 0xb9, 0xd8, 0xf7, 0x75, + 0xd2, 0x0e, 0x28, 0xba, 0x0a, 0x59, 0xd1, 0x6d, 0x85, 0x1e, 0xbf, 0x69, 0x4c, 0x65, 0x77, 0xbb, + 0x2d, 0xfa, 0xfc, 0xa8, 0x74, 0x21, 0x16, 0xdf, 0xf5, 0x59, 0xb3, 0x49, 0x7d, 0xc9, 0xc0, 0x4a, + 0x05, 0x05, 0x30, 0xad, 0x56, 0x64, 0x38, 0xc5, 0xf1, 0x65, 0x6b, 0x65, 0x6a, 0xf5, 0x83, 0xca, + 0x49, 0xf9, 0x53, 0xe9, 0xf1, 0x61, 0x33, 0x01, 0x52, 0x3b, 0x77, 0x7c, 0x54, 0x9a, 0x4e, 0x52, + 0x70, 0xca, 0x48, 0xb9, 0x01, 0xaf, 0xbf, 0x40, 0x1d, 0x6d, 0x40, 0x76, 0xdf, 0xe7, 0xae, 0x5a, + 0xce, 0xd4, 0xea, 0x1b, 0x83, 0xa2, 0x7a, 0x67, 0xef, 0x13, 0x6a, 0x0b, 0x4c, 0xf7, 0xa9, 0x4f, + 0x3d, 0x9b, 0xd6, 0xa6, 0xc3, 0x35, 0xdf, 0xf4, 0xb9, 0x8b, 0x95, 0x7a, 0xf9, 0x5f, 0x19, 0x58, + 0x48, 0x98, 0xe1, 0x5e, 0x83, 0x09, 0xc6, 0x3d, 0x74, 0x3d, 0x15, 0xad, 0xaf, 0xf6, 0x44, 0xeb, + 0xd2, 0x00, 0x95, 0x44, 0xbc, 0xea, 0x30, 0x11, 0x08, 0x22, 0xda, 0x81, 0x8a, 0x54, 0xa1, 0x76, + 0xc5, 0xa8, 0x4f, 0xec, 0x28, 0xea, 0xf3, 0xa3, 0xd2, 0x80, 0x4a, 0xa9, 0x44, 0x48, 0x5a, 0x0a, + 0x1b, 0x0c, 0xf4, 0x09, 0xcc, 0x3a, 0x24, 0x10, 0x77, 0x5b, 0x0d, 0x22, 0xe8, 0x2e, 0x73, 0x69, + 0x71, 0x42, 0xad, 0xf9, 0xeb, 0x89, 0x35, 0x47, 0xc9, 0x5d, 0x69, 0x1d, 0x36, 0x25, 0x21, 0xa8, + 0xc8, 0x52, 0x92, 0x51, 0x90, 0x1a, 0xb5, 0x8b, 0xc6, 0x83, 0xd9, 0x7a, 0x0a, 0x09, 0xf7, 0x20, + 0xa3, 0x0e, 0x20, 0x49, 0xd9, 0xf5, 0x89, 0x17, 0xe8, 0x55, 0x49, 0x7b, 0x99, 0x91, 0xed, 0x2d, + 0x1a, 0x7b, 0xa8, 0xde, 0x87, 0x86, 0x07, 0x58, 0x40, 0x6f, 0xc1, 0x84, 0x4f, 0x49, 0xc0, 0xbd, + 0x62, 0x56, 0x45, 0x6c, 0x36, 0x8c, 0x18, 0x56, 0x54, 0x6c, 0xb8, 0xe8, 0x6b, 0x30, 0xe9, 0xd2, + 0x20, 0x90, 0x95, 0x97, 0x53, 0x82, 0x73, 0x46, 0x70, 0x72, 0x4b, 0x93, 0x71, 0xc8, 0x2f, 0xff, + 0x71, 0x1c, 0xce, 0xa5, 0xb6, 0x69, 0x9f, 0x35, 0xd1, 0x23, 0xc8, 0x4b, 0x3f, 0x1b, 0x44, 0x10, + 0x93, 0x39, 0xef, 0x9c, 0x6e, 0x55, 0x3a, 0x97, 0xb6, 0xa8, 0x20, 0x35, 0x64, 0x4c, 0x42, 0x4c, + 0xc3, 0x11, 0x2a, 0xfa, 0x1e, 0x64, 0x83, 0x16, 0xb5, 0x4d, 0x8d, 0xbc, 0x37, 0x52, 0x8d, 0x28, + 0x1f, 0x77, 0x5a, 0xd4, 0x8e, 0x53, 0x55, 0x7e, 0x61, 0x85, 0x88, 0x1e, 0x45, 0x59, 0xa5, 0xf7, + 0xe3, 0xfd, 0x33, 0x60, 0x2b, 0xfd, 0x38, 0xba, 0xe9, 0x4c, 0x2b, 0xff, 0xdd, 0x82, 0xf3, 0xbd, + 0x2a, 0x75, 0x16, 0x08, 0xf4, 0xfd, 0xbe, 0xb0, 0x55, 0x4e, 0x17, 0x36, 0xa9, 0xad, 0x82, 0x76, + 0xce, 0x98, 0xcc, 0x87, 0x94, 0x44, 0xc8, 0xee, 0x43, 0x8e, 0x09, 0xea, 0x06, 0xa6, 0x43, 0xae, + 0x8e, 0xbe, 0xae, 0x44, 0x03, 0x96, 0x40, 0x58, 0xe3, 0x95, 0x7f, 0x9e, 0x81, 0x62, 0xaf, 0x28, + 0xe6, 0x8e, 0xb3, 0x47, 0xec, 0x43, 0xb4, 0x0c, 0x59, 0x8f, 0xb8, 0x61, 0x85, 0x47, 0x01, 0xbf, + 0x4d, 0x5c, 0x8a, 0x15, 0x07, 0xfd, 0xc6, 0x02, 0xd4, 0x56, 0xb5, 0xd1, 0x58, 0xf3, 0x3c, 0x2e, + 0x88, 0x4c, 0xd7, 0xd0, 0x4b, 0x3c, 0xba, 0x97, 0xa1, 0xe9, 0xca, 0xdd, 0x3e, 0xd0, 0x0d, 0x4f, + 0xf8, 0xdd, 0xb8, 0x6a, 0xfa, 0x05, 0xf0, 0x00, 0x4f, 0xd0, 0x23, 0x93, 0x6b, 0x3a, 0x1f, 0x3e, + 0x3c, 0xbb, 0x47, 0xc3, 0x72, 0x6e, 0x71, 0x03, 0x2e, 0x0d, 0x71, 0x16, 0x9d, 0x83, 0xcc, 0x21, + 0xed, 0xea, 0xf0, 0x61, 0xf9, 0x27, 0x3a, 0x0f, 0xb9, 0x0e, 0x71, 0xda, 0x54, 0x77, 0x3d, 0xac, + 0x3f, 0xae, 0x8d, 0xbf, 0x6f, 0x95, 0xff, 0x94, 0x81, 0xaf, 0xbc, 0xc8, 0xf6, 0x2b, 0xea, 0xe6, + 0xe8, 0x6d, 0xc8, 0xfb, 0xb4, 0xc3, 0x02, 0xc6, 0x3d, 0xe5, 0x44, 0x26, 0xce, 0x3b, 0x6c, 0xe8, + 0x38, 0x92, 0x40, 0x6b, 0x30, 0xc7, 0x3c, 0xdb, 0x69, 0x37, 0xc2, 0x43, 0x45, 0x57, 0x56, 0xbe, + 0x76, 0xc9, 0x28, 0xcd, 0x6d, 0xa6, 0xd9, 0xb8, 0x57, 0x3e, 0x09, 0x41, 0xdd, 0x96, 0x43, 0x04, + 0x55, 0x0d, 0x6c, 0x00, 0x84, 0x61, 0xe3, 0x5e, 0x79, 0x74, 0x0f, 0x2e, 0x1a, 0x12, 0xa6, 0x2d, + 0x87, 0xd9, 0x2a, 0xc6, 0xb2, 0x42, 0x54, 0x87, 0xcb, 0xd7, 0x96, 0x0c, 0xd2, 0xc5, 0xcd, 0x81, + 0x52, 0x78, 0x88, 0x76, 0xc2, 0xb5, 0x70, 0x76, 0x51, 0xe7, 0x46, 0xbf, 0x6b, 0x21, 0x1b, 0xf7, + 0xca, 0x97, 0xff, 0x97, 0xeb, 0xef, 0x07, 0x6a, 0xbb, 0xf6, 0x20, 0x1f, 0x84, 0xa0, 0x7a, 0xcb, + 0xae, 0x8c, 0x92, 0x7c, 0xa1, 0x81, 0x78, 0x77, 0x22, 0x1f, 0x22, 0x5c, 0xe9, 0xbf, 0xcb, 0x3c, + 0x4c, 0x49, 0xa3, 0xbb, 0x43, 0x6d, 0xee, 0x35, 0x82, 0x62, 0x61, 0xd9, 0x5a, 0xc9, 0xc5, 0xfe, + 0x6f, 0xa5, 0xd9, 0xb8, 0x57, 0x1e, 0x51, 0xc8, 0x8b, 0x70, 0x67, 0x75, 0x3f, 0xbe, 0x3e, 0x8a, + 0x9b, 0x66, 0x97, 0xb7, 0xb9, 0xc3, 0x6c, 0x46, 0x83, 0xda, 0xb4, 0xf4, 0x34, 0xca, 0x85, 0x08, + 0x5a, 0x67, 0x9d, 0x0a, 0xbe, 0x4e, 0xa0, 0x5c, 0x32, 0xeb, 0x34, 0x1d, 0x47, 0x12, 0xa8, 0x0e, + 0xe7, 0xc3, 0x0c, 0xfc, 0x98, 0x05, 0x82, 0xfb, 0xdd, 0x3a, 0x73, 0x99, 0x50, 0x79, 0x93, 0xab, + 0x15, 0x8f, 0x8f, 0x4a, 0xe7, 0xf1, 0x00, 0x3e, 0x1e, 0xa8, 0x25, 0xbb, 0x98, 0xa0, 0x81, 0x30, + 0xb9, 0x12, 0xd5, 0xc4, 0x2e, 0x0d, 0x04, 0x56, 0x1c, 0x79, 0xb4, 0xb6, 0xe4, 0xf4, 0xd4, 0x30, + 0xdb, 0x1f, 0x35, 0xff, 0x6d, 0x45, 0xc5, 0x86, 0x8b, 0x7c, 0xc8, 0x07, 0xd4, 0xa1, 0xb6, 0xe0, + 0x7e, 0x71, 0x52, 0xb5, 0xb8, 0x1b, 0x67, 0x3b, 0xbc, 0x2a, 0x3b, 0x06, 0x46, 0x37, 0xb5, 0x78, + 0x8f, 0x0d, 0x19, 0x47, 0x76, 0xd0, 0x16, 0xe4, 0x45, 0x58, 0x37, 0xf9, 0xe1, 0xa5, 0xbf, 0xcd, + 0x1b, 0x61, 0xb9, 0xe8, 0x4e, 0xa5, 0x36, 0x22, 0xac, 0xa8, 0x08, 0x62, 0xf1, 0x3a, 0xcc, 0xa4, + 0x6c, 0x8f, 0xd4, 0xa3, 0xfe, 0x90, 0x83, 0x8b, 0x83, 0xcf, 0x4b, 0x74, 0x1d, 0x66, 0x24, 0x7e, + 0x20, 0xee, 0x51, 0x5f, 0xf5, 0x16, 0x4b, 0xf5, 0x96, 0x0b, 0x66, 0x65, 0x33, 0xf5, 0x24, 0x13, + 0xa7, 0x65, 0xd1, 0x2d, 0x40, 0x7c, 0x2f, 0xa0, 0x7e, 0x87, 0x36, 0x3e, 0xd2, 0x17, 0x8d, 0xb8, + 0x3b, 0x45, 0x0d, 0xff, 0x4e, 0x9f, 0x04, 0x1e, 0xa0, 0x35, 0x62, 0xa6, 0xad, 0xc1, 0x9c, 0x39, + 0x34, 0x42, 0xa6, 0x49, 0xb2, 0xa8, 0x82, 0xee, 0xa6, 0xd9, 0xb8, 0x57, 0x1e, 0x7d, 0x04, 0xf3, + 0xa4, 0x43, 0x98, 0x43, 0xf6, 0x1c, 0x1a, 0x81, 0xe4, 0x14, 0xc8, 0x6b, 0x06, 0x64, 0x7e, 0xad, + 0x57, 0x00, 0xf7, 0xeb, 0xa0, 0x2d, 0x58, 0x68, 0x7b, 0xfd, 0x50, 0x13, 0x0a, 0xea, 0x75, 0x03, + 0xb5, 0x70, 0xb7, 0x5f, 0x04, 0x0f, 0xd2, 0x43, 0x0f, 0x61, 0xb2, 0x41, 0x05, 0x61, 0x4e, 0x50, + 0x9c, 0x54, 0x79, 0xf3, 0xee, 0x28, 0xb9, 0x7a, 0x43, 0xab, 0xea, 0xcb, 0x93, 0xf9, 0xc0, 0x21, + 0x20, 0x62, 0x00, 0x76, 0x38, 0x8a, 0x07, 0xc5, 0xbc, 0x2a, 0x85, 0x6f, 0x8d, 0x58, 0x0a, 0x5a, + 0x3b, 0x1e, 0x15, 0x23, 0x52, 0x80, 0x13, 0xe0, 0x32, 0xb1, 0x7c, 0xd9, 0xb0, 0xa2, 0x78, 0xe8, + 0x0e, 0x17, 0x25, 0x16, 0x4e, 0x32, 0x71, 0x5a, 0xb6, 0xfc, 0x6b, 0x0b, 0xe6, 0xfb, 0xd6, 0x94, + 0x9c, 0x90, 0xad, 0x17, 0x4f, 0xc8, 0xe8, 0x01, 0x4c, 0xd8, 0xb2, 0xf6, 0xc3, 0x91, 0xe6, 0xf2, + 0xc8, 0x17, 0xba, 0xb8, 0x99, 0xa8, 0xcf, 0x00, 0x1b, 0xc0, 0xf2, 0x1c, 0xcc, 0xc4, 0xa2, 0x75, + 0xde, 0x2c, 0x7f, 0x96, 0x4d, 0x1e, 0x25, 0x75, 0xde, 0xbc, 0xd3, 0xd2, 0x21, 0xa8, 0x42, 0xc1, + 0xe6, 0x9e, 0x20, 0x72, 0x80, 0x34, 0x1e, 0xcf, 0x1b, 0xd0, 0xc2, 0x7a, 0xc8, 0xc0, 0xb1, 0x8c, + 0xec, 0x67, 0xfb, 0xdc, 0x71, 0xf8, 0x63, 0x55, 0x43, 0x89, 0x7e, 0x76, 0x53, 0x51, 0xb1, 0xe1, + 0xca, 0x5a, 0x69, 0xc9, 0x96, 0xc9, 0xdb, 0xe1, 0xb1, 0x1e, 0xd5, 0xca, 0xb6, 0xa1, 0xe3, 0x48, + 0x02, 0x5d, 0x81, 0xe9, 0x80, 0x79, 0x36, 0x0d, 0x8f, 0x9a, 0xac, 0x9e, 0x1e, 0xe4, 0x1d, 0x75, + 0x27, 0x41, 0xc7, 0x29, 0x29, 0x74, 0x1f, 0x0a, 0xea, 0x5b, 0xdd, 0x92, 0x72, 0x23, 0xdf, 0x92, + 0x66, 0xe4, 0x22, 0x77, 0x42, 0x00, 0x1c, 0x63, 0xa1, 0x55, 0x00, 0xc1, 0x5c, 0x1a, 0x08, 0xe2, + 0xb6, 0x02, 0xd3, 0xb8, 0xa3, 0x64, 0xda, 0x8d, 0x38, 0x38, 0x21, 0x85, 0xbe, 0x01, 0x05, 0x99, + 0x02, 0x75, 0xe6, 0x51, 0x5d, 0x15, 0x19, 0x6d, 0x60, 0x37, 0x24, 0xe2, 0x98, 0x8f, 0x2a, 0x00, + 0x8e, 0x3c, 0x40, 0x6a, 0x5d, 0x41, 0x03, 0xd5, 0x7b, 0x33, 0xb5, 0x59, 0x09, 0x5e, 0x8f, 0xa8, + 0x38, 0x21, 0x21, 0xa3, 0xee, 0xf1, 0xc7, 0x84, 0x09, 0x95, 0xa2, 0x89, 0xa8, 0xdf, 0xe6, 0xf7, + 0x09, 0x13, 0xd8, 0x70, 0xd1, 0x9b, 0x30, 0xd9, 0x31, 0x4d, 0x12, 0x14, 0xa8, 0xaa, 0xb1, 0xb0, + 0x35, 0x86, 0xbc, 0xf2, 0xbf, 0x53, 0xb9, 0x8b, 0xe9, 0x8f, 0xda, 0xf2, 0xa8, 0x3a, 0x79, 0x24, + 0x7f, 0x0b, 0x26, 0x74, 0x77, 0xed, 0xdd, 0x7c, 0xdd, 0x82, 0xb1, 0xe1, 0xa2, 0x37, 0x20, 0xb7, + 0xcf, 0x7d, 0x9b, 0x9a, 0x9d, 0x8f, 0xae, 0x07, 0x37, 0x25, 0x11, 0x6b, 0x1e, 0xba, 0x07, 0x73, + 0xf4, 0x49, 0x7a, 0xfe, 0xcb, 0xaa, 0x47, 0x95, 0xb7, 0x65, 0x6f, 0xdc, 0x48, 0xb3, 0x86, 0xbf, + 0x91, 0xf4, 0x82, 0x94, 0xff, 0x31, 0x09, 0xa8, 0x7f, 0xd8, 0x41, 0xd7, 0x52, 0x4f, 0x0a, 0x6f, + 0xf5, 0x3c, 0x29, 0x5c, 0xec, 0xd7, 0x48, 0xbc, 0x28, 0x74, 0x60, 0xda, 0x56, 0x2f, 0x52, 0xfa, + 0xfd, 0xc9, 0x4c, 0x33, 0xdf, 0x39, 0xb9, 0x60, 0x5f, 0xfc, 0x8e, 0xa5, 0x13, 0x7c, 0x3d, 0x81, + 0x8c, 0x53, 0x76, 0xd0, 0x4f, 0x61, 0xd6, 0xa7, 0xb6, 0x4f, 0x89, 0xa0, 0xc6, 0xb2, 0xbe, 0x6b, + 0xd4, 0x4e, 0xb6, 0x8c, 0x8d, 0xde, 0x50, 0xdb, 0xe8, 0xf8, 0xa8, 0x34, 0x8b, 0x53, 0xe8, 0xb8, + 0xc7, 0x1a, 0xfa, 0x31, 0xcc, 0xf8, 0xdc, 0x71, 0x98, 0xd7, 0x34, 0xe6, 0xb3, 0xca, 0xfc, 0xda, + 0x29, 0xcc, 0x6b, 0xb5, 0xa1, 0xd6, 0xe7, 0x55, 0x7f, 0x4d, 0x62, 0xe3, 0xb4, 0x29, 0xf4, 0x00, + 0x0a, 0x3e, 0x0d, 0x78, 0xdb, 0xb7, 0x69, 0x60, 0x8a, 0x7b, 0x65, 0xd0, 0x74, 0x82, 0x8d, 0x90, + 0xcc, 0x62, 0xe6, 0x53, 0x69, 0x2b, 0x88, 0x7b, 0x58, 0xc8, 0x0d, 0x70, 0x8c, 0x86, 0x0e, 0x64, + 0x1a, 0xef, 0x51, 0x47, 0x96, 0x76, 0xe6, 0x74, 0x1b, 0xd9, 0xbf, 0x90, 0x4a, 0x5d, 0x41, 0xe8, + 0x29, 0x2b, 0x51, 0x08, 0x92, 0x88, 0x0d, 0x3e, 0xfa, 0x09, 0x4c, 0x91, 0xc4, 0xdd, 0x55, 0x0f, + 0x76, 0x1b, 0x67, 0x32, 0xd7, 0x77, 0x5d, 0x8d, 0x9e, 0x2b, 0x93, 0xf7, 0xd4, 0xa4, 0x39, 0x74, + 0x07, 0x2e, 0x10, 0x5b, 0xb0, 0x0e, 0xbd, 0x41, 0x49, 0xc3, 0x61, 0x5e, 0xd4, 0x5e, 0x75, 0xc3, + 0x79, 0xed, 0xf8, 0xa8, 0x74, 0x61, 0x6d, 0x90, 0x00, 0x1e, 0xac, 0xb7, 0x78, 0x15, 0xa6, 0x12, + 0xab, 0x1e, 0x65, 0xbe, 0x5b, 0xfc, 0x10, 0xce, 0xbd, 0xd4, 0x1d, 0xf6, 0x77, 0xe3, 0x50, 0xee, + 0x6b, 0x00, 0xea, 0x49, 0x72, 0xfd, 0x80, 0x78, 0xcd, 0x30, 0x63, 0xab, 0x50, 0x20, 0x6d, 0xc1, + 0x5d, 0x22, 0x98, 0xad, 0x80, 0xf3, 0x71, 0x2e, 0xac, 0x85, 0x0c, 0x1c, 0xcb, 0xa0, 0x6b, 0x30, + 0x1b, 0x1d, 0x6e, 0xb2, 0xd3, 0xe9, 0xd3, 0xb8, 0xa0, 0xcb, 0x63, 0x3d, 0xc5, 0xc1, 0x3d, 0x92, + 0xd1, 0xb5, 0x39, 0xf3, 0x72, 0xd7, 0xe6, 0x5b, 0xe1, 0xab, 0x9f, 0x5a, 0x13, 0x6d, 0xa8, 0x55, + 0x99, 0x97, 0xb8, 0x9e, 0x97, 0xbc, 0xa4, 0x04, 0x1e, 0xa0, 0x55, 0xfe, 0x99, 0x05, 0xaf, 0x0d, + 0xbd, 0x42, 0xa1, 0x1f, 0x84, 0x4f, 0x3d, 0x96, 0x4a, 0xc4, 0xab, 0x67, 0xbd, 0x8e, 0x75, 0x07, + 0xbf, 0xf8, 0x5c, 0xcb, 0xff, 0xea, 0xb7, 0xa5, 0xb1, 0x4f, 0xff, 0xb3, 0x3c, 0x56, 0xfe, 0xd2, + 0x82, 0x4b, 0x43, 0x74, 0x5f, 0xe6, 0x29, 0xfc, 0x17, 0x16, 0xcc, 0xb3, 0xde, 0x4d, 0x37, 0xed, + 0xf8, 0xc6, 0x19, 0x56, 0xd3, 0x97, 0x40, 0xb5, 0x0b, 0x72, 0xa6, 0xee, 0x23, 0xe3, 0x7e, 0xab, + 0xe5, 0x7f, 0x5a, 0x30, 0xbb, 0xf1, 0x84, 0xda, 0xb7, 0xe9, 0xe3, 0x6d, 0xde, 0xf8, 0x98, 0xf3, + 0xc3, 0xe4, 0xef, 0x03, 0xd6, 0xf0, 0xdf, 0x07, 0xd0, 0x55, 0xc8, 0x50, 0xaf, 0x73, 0x8a, 0x5f, + 0x24, 0xa6, 0x4c, 0x6c, 0x32, 0x1b, 0x5e, 0x07, 0x4b, 0x1d, 0x39, 0xb2, 0xa6, 0x92, 0x50, 0xe5, + 0x5e, 0x21, 0x1e, 0x59, 0x53, 0x19, 0x8b, 0xd3, 0xb2, 0x6a, 0x3a, 0xe0, 0x4e, 0x5b, 0x26, 0x79, + 0x36, 0x76, 0xef, 0x9e, 0x26, 0xe1, 0x90, 0x57, 0xfe, 0xfd, 0x38, 0xcc, 0xd4, 0xd9, 0x3e, 0xb5, + 0xbb, 0xb6, 0x43, 0xd5, 0xba, 0x1e, 0xc0, 0xcc, 0x3e, 0x61, 0x4e, 0xdb, 0xa7, 0x7a, 0x0b, 0xcd, + 0xd6, 0xbd, 0x1b, 0x5a, 0xbd, 0x99, 0x64, 0x3e, 0x3f, 0x2a, 0x2d, 0xa6, 0xd4, 0x53, 0x5c, 0x9c, + 0x46, 0x42, 0x8f, 0x00, 0x68, 0x14, 0x44, 0xb3, 0x93, 0xef, 0x9c, 0xbc, 0x93, 0xe9, 0xc0, 0xeb, + 0xd9, 0x29, 0xa6, 0xe1, 0x04, 0x26, 0xfa, 0xa1, 0x1c, 0xcc, 0x9a, 0x6a, 0x4b, 0x03, 0xf5, 0xb3, + 0xcd, 0xd4, 0x6a, 0xe5, 0x64, 0x03, 0xbb, 0x46, 0x45, 0xc1, 0x47, 0x2d, 0x24, 0xa4, 0xaa, 0x61, + 0xce, 0xfc, 0x59, 0xfe, 0xeb, 0x38, 0x2c, 0x9f, 0x74, 0xdc, 0xca, 0x3e, 0x23, 0x87, 0x45, 0xde, + 0x16, 0x61, 0x13, 0xd6, 0xb7, 0x58, 0xd5, 0x67, 0x76, 0x53, 0x1c, 0xdc, 0x23, 0x89, 0x6e, 0x41, + 0xa6, 0xe5, 0x53, 0x13, 0x9c, 0xea, 0xc9, 0xbe, 0xa7, 0xa2, 0x5f, 0x9b, 0x94, 0x09, 0xb4, 0xed, + 0x53, 0x2c, 0x41, 0x24, 0x96, 0xcb, 0x1a, 0xa6, 0x65, 0x9d, 0x0d, 0x6b, 0x8b, 0x35, 0xb0, 0x04, + 0x41, 0x5b, 0x90, 0x6d, 0xf1, 0x40, 0x98, 0xa9, 0x60, 0x64, 0xb0, 0xbc, 0xac, 0xfa, 0x6d, 0x1e, + 0x08, 0xac, 0x60, 0xca, 0x7f, 0xcb, 0x42, 0xe9, 0x84, 0xb9, 0x01, 0x6d, 0xc2, 0x82, 0xbe, 0x24, + 0x6f, 0x53, 0x9f, 0xf1, 0x46, 0x3a, 0x96, 0x97, 0xd4, 0x25, 0xb6, 0x9f, 0x8d, 0x07, 0xe9, 0xa0, + 0x0f, 0x60, 0x8e, 0x79, 0x82, 0xfa, 0x1d, 0xe2, 0x84, 0x30, 0xfa, 0x59, 0x60, 0x41, 0xbf, 0xce, + 0xa5, 0x58, 0xb8, 0x57, 0x76, 0xc0, 0x86, 0x66, 0x4e, 0xbd, 0xa1, 0x0e, 0xcc, 0xba, 0xe4, 0x49, + 0xe2, 0xba, 0x6d, 0x42, 0x38, 0xfc, 0xd7, 0x90, 0xb6, 0x60, 0x4e, 0x45, 0xff, 0x60, 0x5a, 0xd9, + 0xf4, 0xc4, 0x1d, 0x7f, 0x47, 0xf8, 0xcc, 0x6b, 0x6a, 0x6b, 0x5b, 0x29, 0x2c, 0xdc, 0x83, 0x8d, + 0x1e, 0x42, 0xde, 0x25, 0x4f, 0x76, 0xda, 0x7e, 0x33, 0xbc, 0x25, 0x8d, 0x6e, 0x47, 0xbd, 0xf9, + 0x6c, 0x19, 0x14, 0x1c, 0xe1, 0x85, 0xa9, 0x39, 0xf9, 0x2a, 0x52, 0x33, 0x4c, 0xa7, 0xfc, 0xab, + 0x49, 0xa7, 0xcf, 0x2c, 0x98, 0x4e, 0x56, 0x71, 0x7f, 0xef, 0xb4, 0x46, 0xe8, 0x9d, 0xdf, 0x86, + 0x71, 0xc1, 0x4d, 0x09, 0x9e, 0xea, 0xa4, 0x07, 0x03, 0x3b, 0xbe, 0xcb, 0xf1, 0xb8, 0xe0, 0xb5, + 0x9b, 0x4f, 0x9f, 0x2d, 0x8d, 0x7d, 0xfe, 0x6c, 0x69, 0xec, 0x8b, 0x67, 0x4b, 0x63, 0x9f, 0x1e, + 0x2f, 0x59, 0x4f, 0x8f, 0x97, 0xac, 0xcf, 0x8f, 0x97, 0xac, 0x2f, 0x8e, 0x97, 0xac, 0x2f, 0x8f, + 0x97, 0xac, 0x5f, 0xfe, 0x77, 0x69, 0xec, 0xe1, 0xf2, 0x49, 0xff, 0x46, 0xf0, 0xff, 0x00, 0x00, + 0x00, 0xff, 0xff, 0x5e, 0x3a, 0xd7, 0x70, 0x69, 0x20, 0x00, 0x00, +} + +func (m *CustomDeploymentStrategyParams) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomDeploymentStrategyParams) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomDeploymentStrategyParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Command) > 0 { + for iNdEx := len(m.Command) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Command[iNdEx]) + copy(dAtA[i:], m.Command[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Command[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Environment) > 0 { + for iNdEx := len(m.Environment) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Environment[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Image) + copy(dAtA[i:], m.Image) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Image))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeploymentCause) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentCause) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentCause) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ImageTrigger != nil { + { + size, err := m.ImageTrigger.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeploymentCauseImageTrigger) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentCauseImageTrigger) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentCauseImageTrigger) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.From.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeploymentCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.LastUpdateTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeploymentConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentConfig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeploymentConfigList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentConfigList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentConfigList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeploymentConfigRollback) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentConfigRollback) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentConfigRollback) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if len(m.UpdatedAnnotations) > 0 { + keysForUpdatedAnnotations := make([]string, 0, len(m.UpdatedAnnotations)) + for k := range m.UpdatedAnnotations { + keysForUpdatedAnnotations = append(keysForUpdatedAnnotations, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForUpdatedAnnotations) + for iNdEx := len(keysForUpdatedAnnotations) - 1; iNdEx >= 0; iNdEx-- { + v := m.UpdatedAnnotations[string(keysForUpdatedAnnotations[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForUpdatedAnnotations[iNdEx]) + copy(dAtA[i:], keysForUpdatedAnnotations[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForUpdatedAnnotations[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeploymentConfigRollbackSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentConfigRollbackSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentConfigRollbackSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i-- + if m.IncludeStrategy { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + i-- + if m.IncludeReplicationMeta { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + i-- + if m.IncludeTemplate { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + i-- + if m.IncludeTriggers { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.Revision)) + i-- + dAtA[i] = 0x10 + { + size, err := m.From.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeploymentConfigSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentConfigSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentConfigSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + i-- + dAtA[i] = 0x48 + if m.Template != nil { + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + if len(m.Selector) > 0 { + keysForSelector := make([]string, 0, len(m.Selector)) + for k := range m.Selector { + keysForSelector = append(keysForSelector, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) + for iNdEx := len(keysForSelector) - 1; iNdEx >= 0; iNdEx-- { + v := m.Selector[string(keysForSelector[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForSelector[iNdEx]) + copy(dAtA[i:], keysForSelector[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForSelector[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x3a + } + } + i-- + if m.Paused { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + i-- + if m.Test { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + if m.RevisionHistoryLimit != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + i-- + dAtA[i] = 0x20 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i-- + dAtA[i] = 0x18 + if m.Triggers != nil { + { + size, err := m.Triggers.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + { + size, err := m.Strategy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeploymentConfigStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentConfigStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentConfigStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + i-- + dAtA[i] = 0x48 + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + } + if m.Details != nil { + { + size, err := m.Details.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + i = encodeVarintGenerated(dAtA, i, uint64(m.UnavailableReplicas)) + i-- + dAtA[i] = 0x30 + i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.LatestVersion)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *DeploymentDetails) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentDetails) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentDetails) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Causes) > 0 { + for iNdEx := len(m.Causes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Causes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeploymentLog) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentLog) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentLog) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *DeploymentLogOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentLogOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentLogOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Version != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Version)) + i-- + dAtA[i] = 0x50 + } + i-- + if m.NoWait { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x48 + if m.LimitBytes != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.LimitBytes)) + i-- + dAtA[i] = 0x40 + } + if m.TailLines != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TailLines)) + i-- + dAtA[i] = 0x38 + } + i-- + if m.Timestamps { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + if m.SinceTime != nil { + { + size, err := m.SinceTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.SinceSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.SinceSeconds)) + i-- + dAtA[i] = 0x20 + } + i-- + if m.Previous { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + i-- + if m.Follow { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + i -= len(m.Container) + copy(dAtA[i:], m.Container) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Container))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeploymentRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ExcludeTriggers) > 0 { + for iNdEx := len(m.ExcludeTriggers) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ExcludeTriggers[iNdEx]) + copy(dAtA[i:], m.ExcludeTriggers[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ExcludeTriggers[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + i-- + if m.Force { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + i-- + if m.Latest { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeploymentStrategy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentStrategy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ActiveDeadlineSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.ActiveDeadlineSeconds)) + i-- + dAtA[i] = 0x40 + } + if len(m.Annotations) > 0 { + keysForAnnotations := make([]string, 0, len(m.Annotations)) + for k := range m.Annotations { + keysForAnnotations = append(keysForAnnotations, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) + for iNdEx := len(keysForAnnotations) - 1; iNdEx >= 0; iNdEx-- { + v := m.Annotations[string(keysForAnnotations[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForAnnotations[iNdEx]) + copy(dAtA[i:], keysForAnnotations[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAnnotations[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x3a + } + } + if len(m.Labels) > 0 { + keysForLabels := make([]string, 0, len(m.Labels)) + for k := range m.Labels { + keysForLabels = append(keysForLabels, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) + for iNdEx := len(keysForLabels) - 1; iNdEx >= 0; iNdEx-- { + v := m.Labels[string(keysForLabels[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForLabels[iNdEx]) + copy(dAtA[i:], keysForLabels[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForLabels[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x32 + } + } + { + size, err := m.Resources.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + if m.RollingParams != nil { + { + size, err := m.RollingParams.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.RecreateParams != nil { + { + size, err := m.RecreateParams.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.CustomParams != nil { + { + size, err := m.CustomParams.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeploymentTriggerImageChangeParams) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentTriggerImageChangeParams) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentTriggerImageChangeParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.LastTriggeredImage) + copy(dAtA[i:], m.LastTriggeredImage) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.LastTriggeredImage))) + i-- + dAtA[i] = 0x22 + { + size, err := m.From.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if len(m.ContainerNames) > 0 { + for iNdEx := len(m.ContainerNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ContainerNames[iNdEx]) + copy(dAtA[i:], m.ContainerNames[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContainerNames[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + i-- + if m.Automatic { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m DeploymentTriggerPolicies) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m DeploymentTriggerPolicies) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m DeploymentTriggerPolicies) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m) > 0 { + for iNdEx := len(m) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *DeploymentTriggerPolicy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentTriggerPolicy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentTriggerPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ImageChangeParams != nil { + { + size, err := m.ImageChangeParams.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ExecNewPodHook) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExecNewPodHook) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExecNewPodHook) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Volumes) > 0 { + for iNdEx := len(m.Volumes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Volumes[iNdEx]) + copy(dAtA[i:], m.Volumes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Volumes[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + i -= len(m.ContainerName) + copy(dAtA[i:], m.ContainerName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContainerName))) + i-- + dAtA[i] = 0x1a + if len(m.Env) > 0 { + for iNdEx := len(m.Env) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Env[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Command) > 0 { + for iNdEx := len(m.Command) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Command[iNdEx]) + copy(dAtA[i:], m.Command[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Command[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *LifecycleHook) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LifecycleHook) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LifecycleHook) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.TagImages) > 0 { + for iNdEx := len(m.TagImages) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.TagImages[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.ExecNewPod != nil { + { + size, err := m.ExecNewPod.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.FailurePolicy) + copy(dAtA[i:], m.FailurePolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FailurePolicy))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RecreateDeploymentStrategyParams) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RecreateDeploymentStrategyParams) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RecreateDeploymentStrategyParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Post != nil { + { + size, err := m.Post.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.Mid != nil { + { + size, err := m.Mid.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Pre != nil { + { + size, err := m.Pre.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.TimeoutSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *RollingDeploymentStrategyParams) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RollingDeploymentStrategyParams) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RollingDeploymentStrategyParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Post != nil { + { + size, err := m.Post.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + if m.Pre != nil { + { + size, err := m.Pre.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + if m.MaxSurge != nil { + { + size, err := m.MaxSurge.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.MaxUnavailable != nil { + { + size, err := m.MaxUnavailable.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.TimeoutSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) + i-- + dAtA[i] = 0x18 + } + if m.IntervalSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.IntervalSeconds)) + i-- + dAtA[i] = 0x10 + } + if m.UpdatePeriodSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.UpdatePeriodSeconds)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *TagImageHook) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TagImageHook) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TagImageHook) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.To.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.ContainerName) + copy(dAtA[i:], m.ContainerName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContainerName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *CustomDeploymentStrategyParams) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Image) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Environment) > 0 { + for _, e := range m.Environment { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Command) > 0 { + for _, s := range m.Command { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DeploymentCause) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.ImageTrigger != nil { + l = m.ImageTrigger.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *DeploymentCauseImageTrigger) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.From.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DeploymentCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastUpdateTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DeploymentConfig) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DeploymentConfigList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DeploymentConfigRollback) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.UpdatedAnnotations) > 0 { + for k, v := range m.UpdatedAnnotations { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DeploymentConfigRollbackSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.From.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Revision)) + n += 2 + n += 2 + n += 2 + n += 2 + return n +} + +func (m *DeploymentConfigSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Strategy.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Triggers != nil { + l = m.Triggers.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 1 + sovGenerated(uint64(m.Replicas)) + if m.RevisionHistoryLimit != nil { + n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit)) + } + n += 2 + n += 2 + if len(m.Selector) > 0 { + for k, v := range m.Selector { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.Template != nil { + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 1 + sovGenerated(uint64(m.MinReadySeconds)) + return n +} + +func (m *DeploymentConfigStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.LatestVersion)) + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + n += 1 + sovGenerated(uint64(m.Replicas)) + n += 1 + sovGenerated(uint64(m.UpdatedReplicas)) + n += 1 + sovGenerated(uint64(m.AvailableReplicas)) + n += 1 + sovGenerated(uint64(m.UnavailableReplicas)) + if m.Details != nil { + l = m.Details.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 1 + sovGenerated(uint64(m.ReadyReplicas)) + return n +} + +func (m *DeploymentDetails) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Causes) > 0 { + for _, e := range m.Causes { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DeploymentLog) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *DeploymentLogOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Container) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + n += 2 + if m.SinceSeconds != nil { + n += 1 + sovGenerated(uint64(*m.SinceSeconds)) + } + if m.SinceTime != nil { + l = m.SinceTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + if m.TailLines != nil { + n += 1 + sovGenerated(uint64(*m.TailLines)) + } + if m.LimitBytes != nil { + n += 1 + sovGenerated(uint64(*m.LimitBytes)) + } + n += 2 + if m.Version != nil { + n += 1 + sovGenerated(uint64(*m.Version)) + } + return n +} + +func (m *DeploymentRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + n += 2 + if len(m.ExcludeTriggers) > 0 { + for _, s := range m.ExcludeTriggers { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DeploymentStrategy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.CustomParams != nil { + l = m.CustomParams.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.RecreateParams != nil { + l = m.RecreateParams.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.RollingParams != nil { + l = m.RollingParams.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.Resources.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Annotations) > 0 { + for k, v := range m.Annotations { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.ActiveDeadlineSeconds != nil { + n += 1 + sovGenerated(uint64(*m.ActiveDeadlineSeconds)) + } + return n +} + +func (m *DeploymentTriggerImageChangeParams) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + if len(m.ContainerNames) > 0 { + for _, s := range m.ContainerNames { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.From.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.LastTriggeredImage) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m DeploymentTriggerPolicies) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m) > 0 { + for _, e := range m { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DeploymentTriggerPolicy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.ImageChangeParams != nil { + l = m.ImageChangeParams.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ExecNewPodHook) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Command) > 0 { + for _, s := range m.Command { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Env) > 0 { + for _, e := range m.Env { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.ContainerName) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Volumes) > 0 { + for _, s := range m.Volumes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *LifecycleHook) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.FailurePolicy) + n += 1 + l + sovGenerated(uint64(l)) + if m.ExecNewPod != nil { + l = m.ExecNewPod.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.TagImages) > 0 { + for _, e := range m.TagImages { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *RecreateDeploymentStrategyParams) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TimeoutSeconds != nil { + n += 1 + sovGenerated(uint64(*m.TimeoutSeconds)) + } + if m.Pre != nil { + l = m.Pre.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Mid != nil { + l = m.Mid.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Post != nil { + l = m.Post.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *RollingDeploymentStrategyParams) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.UpdatePeriodSeconds != nil { + n += 1 + sovGenerated(uint64(*m.UpdatePeriodSeconds)) + } + if m.IntervalSeconds != nil { + n += 1 + sovGenerated(uint64(*m.IntervalSeconds)) + } + if m.TimeoutSeconds != nil { + n += 1 + sovGenerated(uint64(*m.TimeoutSeconds)) + } + if m.MaxUnavailable != nil { + l = m.MaxUnavailable.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.MaxSurge != nil { + l = m.MaxSurge.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Pre != nil { + l = m.Pre.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Post != nil { + l = m.Post.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *TagImageHook) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ContainerName) + n += 1 + l + sovGenerated(uint64(l)) + l = m.To.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *CustomDeploymentStrategyParams) String() string { + if this == nil { + return "nil" + } + repeatedStringForEnvironment := "[]EnvVar{" + for _, f := range this.Environment { + repeatedStringForEnvironment += fmt.Sprintf("%v", f) + "," + } + repeatedStringForEnvironment += "}" + s := strings.Join([]string{`&CustomDeploymentStrategyParams{`, + `Image:` + fmt.Sprintf("%v", this.Image) + `,`, + `Environment:` + repeatedStringForEnvironment + `,`, + `Command:` + fmt.Sprintf("%v", this.Command) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentCause) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentCause{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `ImageTrigger:` + strings.Replace(this.ImageTrigger.String(), "DeploymentCauseImageTrigger", "DeploymentCauseImageTrigger", 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentCauseImageTrigger) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentCauseImageTrigger{`, + `From:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.From), "ObjectReference", "v1.ObjectReference", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v11.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `LastUpdateTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastUpdateTime), "Time", "v11.Time", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentConfig{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DeploymentConfigSpec", "DeploymentConfigSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DeploymentConfigStatus", "DeploymentConfigStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentConfigList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]DeploymentConfig{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "DeploymentConfig", "DeploymentConfig", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&DeploymentConfigList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentConfigRollback) String() string { + if this == nil { + return "nil" + } + keysForUpdatedAnnotations := make([]string, 0, len(this.UpdatedAnnotations)) + for k := range this.UpdatedAnnotations { + keysForUpdatedAnnotations = append(keysForUpdatedAnnotations, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForUpdatedAnnotations) + mapStringForUpdatedAnnotations := "map[string]string{" + for _, k := range keysForUpdatedAnnotations { + mapStringForUpdatedAnnotations += fmt.Sprintf("%v: %v,", k, this.UpdatedAnnotations[k]) + } + mapStringForUpdatedAnnotations += "}" + s := strings.Join([]string{`&DeploymentConfigRollback{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `UpdatedAnnotations:` + mapStringForUpdatedAnnotations + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DeploymentConfigRollbackSpec", "DeploymentConfigRollbackSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentConfigRollbackSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentConfigRollbackSpec{`, + `From:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.From), "ObjectReference", "v1.ObjectReference", 1), `&`, ``, 1) + `,`, + `Revision:` + fmt.Sprintf("%v", this.Revision) + `,`, + `IncludeTriggers:` + fmt.Sprintf("%v", this.IncludeTriggers) + `,`, + `IncludeTemplate:` + fmt.Sprintf("%v", this.IncludeTemplate) + `,`, + `IncludeReplicationMeta:` + fmt.Sprintf("%v", this.IncludeReplicationMeta) + `,`, + `IncludeStrategy:` + fmt.Sprintf("%v", this.IncludeStrategy) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentConfigSpec) String() string { + if this == nil { + return "nil" + } + keysForSelector := make([]string, 0, len(this.Selector)) + for k := range this.Selector { + keysForSelector = append(keysForSelector, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) + mapStringForSelector := "map[string]string{" + for _, k := range keysForSelector { + mapStringForSelector += fmt.Sprintf("%v: %v,", k, this.Selector[k]) + } + mapStringForSelector += "}" + s := strings.Join([]string{`&DeploymentConfigSpec{`, + `Strategy:` + strings.Replace(strings.Replace(this.Strategy.String(), "DeploymentStrategy", "DeploymentStrategy", 1), `&`, ``, 1) + `,`, + `Triggers:` + strings.Replace(fmt.Sprintf("%v", this.Triggers), "DeploymentTriggerPolicies", "DeploymentTriggerPolicies", 1) + `,`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, + `Test:` + fmt.Sprintf("%v", this.Test) + `,`, + `Paused:` + fmt.Sprintf("%v", this.Paused) + `,`, + `Selector:` + mapStringForSelector + `,`, + `Template:` + strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v1.PodTemplateSpec", 1) + `,`, + `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentConfigStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]DeploymentCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "DeploymentCondition", "DeploymentCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&DeploymentConfigStatus{`, + `LatestVersion:` + fmt.Sprintf("%v", this.LatestVersion) + `,`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `UpdatedReplicas:` + fmt.Sprintf("%v", this.UpdatedReplicas) + `,`, + `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, + `UnavailableReplicas:` + fmt.Sprintf("%v", this.UnavailableReplicas) + `,`, + `Details:` + strings.Replace(this.Details.String(), "DeploymentDetails", "DeploymentDetails", 1) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentDetails) String() string { + if this == nil { + return "nil" + } + repeatedStringForCauses := "[]DeploymentCause{" + for _, f := range this.Causes { + repeatedStringForCauses += strings.Replace(strings.Replace(f.String(), "DeploymentCause", "DeploymentCause", 1), `&`, ``, 1) + "," + } + repeatedStringForCauses += "}" + s := strings.Join([]string{`&DeploymentDetails{`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `Causes:` + repeatedStringForCauses + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentLog) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentLog{`, + `}`, + }, "") + return s +} +func (this *DeploymentLogOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentLogOptions{`, + `Container:` + fmt.Sprintf("%v", this.Container) + `,`, + `Follow:` + fmt.Sprintf("%v", this.Follow) + `,`, + `Previous:` + fmt.Sprintf("%v", this.Previous) + `,`, + `SinceSeconds:` + valueToStringGenerated(this.SinceSeconds) + `,`, + `SinceTime:` + strings.Replace(fmt.Sprintf("%v", this.SinceTime), "Time", "v11.Time", 1) + `,`, + `Timestamps:` + fmt.Sprintf("%v", this.Timestamps) + `,`, + `TailLines:` + valueToStringGenerated(this.TailLines) + `,`, + `LimitBytes:` + valueToStringGenerated(this.LimitBytes) + `,`, + `NoWait:` + fmt.Sprintf("%v", this.NoWait) + `,`, + `Version:` + valueToStringGenerated(this.Version) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentRequest{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Latest:` + fmt.Sprintf("%v", this.Latest) + `,`, + `Force:` + fmt.Sprintf("%v", this.Force) + `,`, + `ExcludeTriggers:` + fmt.Sprintf("%v", this.ExcludeTriggers) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentStrategy) String() string { + if this == nil { + return "nil" + } + keysForLabels := make([]string, 0, len(this.Labels)) + for k := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + } + mapStringForLabels += "}" + keysForAnnotations := make([]string, 0, len(this.Annotations)) + for k := range this.Annotations { + keysForAnnotations = append(keysForAnnotations, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) + mapStringForAnnotations := "map[string]string{" + for _, k := range keysForAnnotations { + mapStringForAnnotations += fmt.Sprintf("%v: %v,", k, this.Annotations[k]) + } + mapStringForAnnotations += "}" + s := strings.Join([]string{`&DeploymentStrategy{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `CustomParams:` + strings.Replace(this.CustomParams.String(), "CustomDeploymentStrategyParams", "CustomDeploymentStrategyParams", 1) + `,`, + `RecreateParams:` + strings.Replace(this.RecreateParams.String(), "RecreateDeploymentStrategyParams", "RecreateDeploymentStrategyParams", 1) + `,`, + `RollingParams:` + strings.Replace(this.RollingParams.String(), "RollingDeploymentStrategyParams", "RollingDeploymentStrategyParams", 1) + `,`, + `Resources:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Resources), "ResourceRequirements", "v1.ResourceRequirements", 1), `&`, ``, 1) + `,`, + `Labels:` + mapStringForLabels + `,`, + `Annotations:` + mapStringForAnnotations + `,`, + `ActiveDeadlineSeconds:` + valueToStringGenerated(this.ActiveDeadlineSeconds) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentTriggerImageChangeParams) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentTriggerImageChangeParams{`, + `Automatic:` + fmt.Sprintf("%v", this.Automatic) + `,`, + `ContainerNames:` + fmt.Sprintf("%v", this.ContainerNames) + `,`, + `From:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.From), "ObjectReference", "v1.ObjectReference", 1), `&`, ``, 1) + `,`, + `LastTriggeredImage:` + fmt.Sprintf("%v", this.LastTriggeredImage) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentTriggerPolicy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentTriggerPolicy{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `ImageChangeParams:` + strings.Replace(this.ImageChangeParams.String(), "DeploymentTriggerImageChangeParams", "DeploymentTriggerImageChangeParams", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ExecNewPodHook) String() string { + if this == nil { + return "nil" + } + repeatedStringForEnv := "[]EnvVar{" + for _, f := range this.Env { + repeatedStringForEnv += fmt.Sprintf("%v", f) + "," + } + repeatedStringForEnv += "}" + s := strings.Join([]string{`&ExecNewPodHook{`, + `Command:` + fmt.Sprintf("%v", this.Command) + `,`, + `Env:` + repeatedStringForEnv + `,`, + `ContainerName:` + fmt.Sprintf("%v", this.ContainerName) + `,`, + `Volumes:` + fmt.Sprintf("%v", this.Volumes) + `,`, + `}`, + }, "") + return s +} +func (this *LifecycleHook) String() string { + if this == nil { + return "nil" + } + repeatedStringForTagImages := "[]TagImageHook{" + for _, f := range this.TagImages { + repeatedStringForTagImages += strings.Replace(strings.Replace(f.String(), "TagImageHook", "TagImageHook", 1), `&`, ``, 1) + "," + } + repeatedStringForTagImages += "}" + s := strings.Join([]string{`&LifecycleHook{`, + `FailurePolicy:` + fmt.Sprintf("%v", this.FailurePolicy) + `,`, + `ExecNewPod:` + strings.Replace(this.ExecNewPod.String(), "ExecNewPodHook", "ExecNewPodHook", 1) + `,`, + `TagImages:` + repeatedStringForTagImages + `,`, + `}`, + }, "") + return s +} +func (this *RecreateDeploymentStrategyParams) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RecreateDeploymentStrategyParams{`, + `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, + `Pre:` + strings.Replace(this.Pre.String(), "LifecycleHook", "LifecycleHook", 1) + `,`, + `Mid:` + strings.Replace(this.Mid.String(), "LifecycleHook", "LifecycleHook", 1) + `,`, + `Post:` + strings.Replace(this.Post.String(), "LifecycleHook", "LifecycleHook", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RollingDeploymentStrategyParams) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RollingDeploymentStrategyParams{`, + `UpdatePeriodSeconds:` + valueToStringGenerated(this.UpdatePeriodSeconds) + `,`, + `IntervalSeconds:` + valueToStringGenerated(this.IntervalSeconds) + `,`, + `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, + `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`, + `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "intstr.IntOrString", 1) + `,`, + `Pre:` + strings.Replace(this.Pre.String(), "LifecycleHook", "LifecycleHook", 1) + `,`, + `Post:` + strings.Replace(this.Post.String(), "LifecycleHook", "LifecycleHook", 1) + `,`, + `}`, + }, "") + return s +} +func (this *TagImageHook) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TagImageHook{`, + `ContainerName:` + fmt.Sprintf("%v", this.ContainerName) + `,`, + `To:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.To), "ObjectReference", "v1.ObjectReference", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *CustomDeploymentStrategyParams) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomDeploymentStrategyParams: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomDeploymentStrategyParams: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Image = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Environment", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Environment = append(m.Environment, v1.EnvVar{}) + if err := m.Environment[len(m.Environment)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Command = append(m.Command, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentCause) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentCause: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentCause: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = DeploymentTriggerType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImageTrigger", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ImageTrigger == nil { + m.ImageTrigger = &DeploymentCauseImageTrigger{} + } + if err := m.ImageTrigger.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentCauseImageTrigger) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentCauseImageTrigger: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentCauseImageTrigger: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.From.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = DeploymentConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastUpdateTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastUpdateTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentConfigList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentConfigList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentConfigList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, DeploymentConfig{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentConfigRollback) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentConfigRollback: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentConfigRollback: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedAnnotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.UpdatedAnnotations == nil { + m.UpdatedAnnotations = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.UpdatedAnnotations[mapkey] = mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentConfigRollbackSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentConfigRollbackSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentConfigRollbackSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.From.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) + } + m.Revision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Revision |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IncludeTriggers", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IncludeTriggers = bool(v != 0) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IncludeTemplate", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IncludeTemplate = bool(v != 0) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IncludeReplicationMeta", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IncludeReplicationMeta = bool(v != 0) + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IncludeStrategy", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IncludeStrategy = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentConfigSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentConfigSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentConfigSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Strategy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Strategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Triggers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Triggers == nil { + m.Triggers = DeploymentTriggerPolicies{} + } + if err := m.Triggers.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Replicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RevisionHistoryLimit", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.RevisionHistoryLimit = &v + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Test", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Test = bool(v != 0) + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Paused", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Paused = bool(v != 0) + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Selector[mapkey] = mapvalue + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Template == nil { + m.Template = &v1.PodTemplateSpec{} + } + if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) + } + m.MinReadySeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MinReadySeconds |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentConfigStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentConfigStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentConfigStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LatestVersion", wireType) + } + m.LatestVersion = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LatestVersion |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + m.ObservedGeneration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ObservedGeneration |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Replicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedReplicas", wireType) + } + m.UpdatedReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.UpdatedReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AvailableReplicas", wireType) + } + m.AvailableReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.AvailableReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UnavailableReplicas", wireType) + } + m.UnavailableReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.UnavailableReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Details", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Details == nil { + m.Details = &DeploymentDetails{} + } + if err := m.Details.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, DeploymentCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadyReplicas", wireType) + } + m.ReadyReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ReadyReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentDetails) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentDetails: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentDetails: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Causes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Causes = append(m.Causes, DeploymentCause{}) + if err := m.Causes[len(m.Causes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentLog) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentLog: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentLog: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentLogOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentLogOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentLogOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Container = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Follow", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Follow = bool(v != 0) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Previous", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Previous = bool(v != 0) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SinceSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SinceSeconds = &v + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SinceTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SinceTime == nil { + m.SinceTime = &v11.Time{} + } + if err := m.SinceTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamps", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Timestamps = bool(v != 0) + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TailLines", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TailLines = &v + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LimitBytes", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.LimitBytes = &v + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NoWait", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.NoWait = bool(v != 0) + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Version = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Latest", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Latest = bool(v != 0) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Force", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Force = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExcludeTriggers", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExcludeTriggers = append(m.ExcludeTriggers, DeploymentTriggerType(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentStrategy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = DeploymentStrategyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CustomParams", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CustomParams == nil { + m.CustomParams = &CustomDeploymentStrategyParams{} + } + if err := m.CustomParams.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RecreateParams", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RecreateParams == nil { + m.RecreateParams = &RecreateDeploymentStrategyParams{} + } + if err := m.RecreateParams.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RollingParams", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RollingParams == nil { + m.RollingParams = &RollingDeploymentStrategyParams{} + } + if err := m.RollingParams.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Labels == nil { + m.Labels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Labels[mapkey] = mapvalue + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Annotations == nil { + m.Annotations = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Annotations[mapkey] = mapvalue + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ActiveDeadlineSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ActiveDeadlineSeconds = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentTriggerImageChangeParams) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentTriggerImageChangeParams: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentTriggerImageChangeParams: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Automatic", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Automatic = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerNames", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerNames = append(m.ContainerNames, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.From.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTriggeredImage", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LastTriggeredImage = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentTriggerPolicies) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentTriggerPolicies: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentTriggerPolicies: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + *m = append(*m, DeploymentTriggerPolicy{}) + if err := (*m)[len(*m)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentTriggerPolicy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentTriggerPolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentTriggerPolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = DeploymentTriggerType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImageChangeParams", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ImageChangeParams == nil { + m.ImageChangeParams = &DeploymentTriggerImageChangeParams{} + } + if err := m.ImageChangeParams.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExecNewPodHook) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExecNewPodHook: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExecNewPodHook: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Command = append(m.Command, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Env = append(m.Env, v1.EnvVar{}) + if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Volumes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Volumes = append(m.Volumes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LifecycleHook) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LifecycleHook: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LifecycleHook: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FailurePolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FailurePolicy = LifecycleHookFailurePolicy(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExecNewPod", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ExecNewPod == nil { + m.ExecNewPod = &ExecNewPodHook{} + } + if err := m.ExecNewPod.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TagImages", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TagImages = append(m.TagImages, TagImageHook{}) + if err := m.TagImages[len(m.TagImages)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RecreateDeploymentStrategyParams) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RecreateDeploymentStrategyParams: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RecreateDeploymentStrategyParams: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TimeoutSeconds = &v + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pre", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pre == nil { + m.Pre = &LifecycleHook{} + } + if err := m.Pre.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Mid", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Mid == nil { + m.Mid = &LifecycleHook{} + } + if err := m.Mid.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Post", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Post == nil { + m.Post = &LifecycleHook{} + } + if err := m.Post.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RollingDeploymentStrategyParams) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RollingDeploymentStrategyParams: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RollingDeploymentStrategyParams: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdatePeriodSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.UpdatePeriodSeconds = &v + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IntervalSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IntervalSeconds = &v + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TimeoutSeconds = &v + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MaxUnavailable == nil { + m.MaxUnavailable = &intstr.IntOrString{} + } + if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxSurge", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MaxSurge == nil { + m.MaxSurge = &intstr.IntOrString{} + } + if err := m.MaxSurge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pre", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pre == nil { + m.Pre = &LifecycleHook{} + } + if err := m.Pre.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Post", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Post == nil { + m.Post = &LifecycleHook{} + } + if err := m.Post.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TagImageHook) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TagImageHook: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TagImageHook: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field To", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.To.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/openshift/api/apps/v1/generated.proto b/vendor/github.com/openshift/api/apps/v1/generated.proto new file mode 100644 index 000000000..ddf28f600 --- /dev/null +++ b/vendor/github.com/openshift/api/apps/v1/generated.proto @@ -0,0 +1,490 @@ + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package github.com.openshift.api.apps.v1; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "github.com/openshift/api/apps/v1"; + +// CustomDeploymentStrategyParams are the input to the Custom deployment strategy. +message CustomDeploymentStrategyParams { + // Image specifies a container image which can carry out a deployment. + optional string image = 1; + + // Environment holds the environment which will be given to the container for Image. + repeated k8s.io.api.core.v1.EnvVar environment = 2; + + // Command is optional and overrides CMD in the container Image. + repeated string command = 3; +} + +// DeploymentCause captures information about a particular cause of a deployment. +message DeploymentCause { + // Type of the trigger that resulted in the creation of a new deployment + optional string type = 1; + + // ImageTrigger contains the image trigger details, if this trigger was fired based on an image change + optional DeploymentCauseImageTrigger imageTrigger = 2; +} + +// DeploymentCauseImageTrigger represents details about the cause of a deployment originating +// from an image change trigger +message DeploymentCauseImageTrigger { + // From is a reference to the changed object which triggered a deployment. The field may have + // the kinds DockerImage, ImageStreamTag, or ImageStreamImage. + optional k8s.io.api.core.v1.ObjectReference from = 1; +} + +// DeploymentCondition describes the state of a deployment config at a certain point. +message DeploymentCondition { + // Type of deployment condition. + optional string type = 1; + + // Status of the condition, one of True, False, Unknown. + optional string status = 2; + + // The last time this condition was updated. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 6; + + // The last time the condition transitioned from one status to another. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + + // The reason for the condition's last transition. + optional string reason = 4; + + // A human readable message indicating details about the transition. + optional string message = 5; +} + +// Deployment Configs define the template for a pod and manages deploying new images or configuration changes. +// A single deployment configuration is usually analogous to a single micro-service. Can support many different +// deployment patterns, including full restart, customizable rolling updates, and fully custom behaviors, as +// well as pre- and post- deployment hooks. Each individual deployment is represented as a replication controller. +// +// A deployment is "triggered" when its configuration is changed or a tag in an Image Stream is changed. +// Triggers can be disabled to allow manual control over a deployment. The "strategy" determines how the deployment +// is carried out and may be changed at any time. The `latestVersion` field is updated when a new deployment +// is triggered by any means. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// Deprecated: Use deployments or other means for declarative updates for pods instead. +// +openshift:compatibility-gen:level=1 +message DeploymentConfig { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec represents a desired deployment state and how to deploy to it. + optional DeploymentConfigSpec spec = 2; + + // Status represents the current deployment state. + // +optional + optional DeploymentConfigStatus status = 3; +} + +// DeploymentConfigList is a collection of deployment configs. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message DeploymentConfigList { + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of deployment configs + repeated DeploymentConfig items = 2; +} + +// DeploymentConfigRollback provides the input to rollback generation. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message DeploymentConfigRollback { + // Name of the deployment config that will be rolled back. + optional string name = 1; + + // UpdatedAnnotations is a set of new annotations that will be added in the deployment config. + map updatedAnnotations = 2; + + // Spec defines the options to rollback generation. + optional DeploymentConfigRollbackSpec spec = 3; +} + +// DeploymentConfigRollbackSpec represents the options for rollback generation. +message DeploymentConfigRollbackSpec { + // From points to a ReplicationController which is a deployment. + optional k8s.io.api.core.v1.ObjectReference from = 1; + + // Revision to rollback to. If set to 0, rollback to the last revision. + optional int64 revision = 2; + + // IncludeTriggers specifies whether to include config Triggers. + optional bool includeTriggers = 3; + + // IncludeTemplate specifies whether to include the PodTemplateSpec. + optional bool includeTemplate = 4; + + // IncludeReplicationMeta specifies whether to include the replica count and selector. + optional bool includeReplicationMeta = 5; + + // IncludeStrategy specifies whether to include the deployment Strategy. + optional bool includeStrategy = 6; +} + +// DeploymentConfigSpec represents the desired state of the deployment. +message DeploymentConfigSpec { + // Strategy describes how a deployment is executed. + // +optional + optional DeploymentStrategy strategy = 1; + + // MinReadySeconds is the minimum number of seconds for which a newly created pod should + // be ready without any of its container crashing, for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + optional int32 minReadySeconds = 9; + + // Triggers determine how updates to a DeploymentConfig result in new deployments. If no triggers + // are defined, a new deployment can only occur as a result of an explicit client update to the + // DeploymentConfig with a new LatestVersion. If null, defaults to having a config change trigger. + // +optional + optional DeploymentTriggerPolicies triggers = 2; + + // Replicas is the number of desired replicas. + // +optional + optional int32 replicas = 3; + + // RevisionHistoryLimit is the number of old ReplicationControllers to retain to allow for rollbacks. + // This field is a pointer to allow for differentiation between an explicit zero and not specified. + // Defaults to 10. (This only applies to DeploymentConfigs created via the new group API resource, not the legacy resource.) + optional int32 revisionHistoryLimit = 4; + + // Test ensures that this deployment config will have zero replicas except while a deployment is running. This allows the + // deployment config to be used as a continuous deployment test - triggering on images, running the deployment, and then succeeding + // or failing. Post strategy hooks and After actions can be used to integrate successful deployment with an action. + // +optional + optional bool test = 5; + + // Paused indicates that the deployment config is paused resulting in no new deployments on template + // changes or changes in the template caused by other triggers. + optional bool paused = 6; + + // Selector is a label query over pods that should match the Replicas count. + map selector = 7; + + // Template is the object that describes the pod that will be created if + // insufficient replicas are detected. + optional k8s.io.api.core.v1.PodTemplateSpec template = 8; +} + +// DeploymentConfigStatus represents the current deployment state. +message DeploymentConfigStatus { + // LatestVersion is used to determine whether the current deployment associated with a deployment + // config is out of sync. + optional int64 latestVersion = 1; + + // ObservedGeneration is the most recent generation observed by the deployment config controller. + optional int64 observedGeneration = 2; + + // Replicas is the total number of pods targeted by this deployment config. + optional int32 replicas = 3; + + // UpdatedReplicas is the total number of non-terminated pods targeted by this deployment config + // that have the desired template spec. + optional int32 updatedReplicas = 4; + + // AvailableReplicas is the total number of available pods targeted by this deployment config. + optional int32 availableReplicas = 5; + + // UnavailableReplicas is the total number of unavailable pods targeted by this deployment config. + optional int32 unavailableReplicas = 6; + + // Details are the reasons for the update to this deployment config. + // This could be based on a change made by the user or caused by an automatic trigger + optional DeploymentDetails details = 7; + + // Conditions represents the latest available observations of a deployment config's current state. + // +patchMergeKey=type + // +patchStrategy=merge + repeated DeploymentCondition conditions = 8; + + // Total number of ready pods targeted by this deployment. + optional int32 readyReplicas = 9; +} + +// DeploymentDetails captures information about the causes of a deployment. +message DeploymentDetails { + // Message is the user specified change message, if this deployment was triggered manually by the user + optional string message = 1; + + // Causes are extended data associated with all the causes for creating a new deployment + repeated DeploymentCause causes = 2; +} + +// DeploymentLog represents the logs for a deployment +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message DeploymentLog { +} + +// DeploymentLogOptions is the REST options for a deployment log +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message DeploymentLogOptions { + // The container for which to stream logs. Defaults to only container if there is one container in the pod. + optional string container = 1; + + // Follow if true indicates that the build log should be streamed until + // the build terminates. + optional bool follow = 2; + + // Return previous deployment logs. Defaults to false. + optional bool previous = 3; + + // A relative time in seconds before the current time from which to show logs. If this value + // precedes the time a pod was started, only logs since the pod start will be returned. + // If this value is in the future, no logs will be returned. + // Only one of sinceSeconds or sinceTime may be specified. + optional int64 sinceSeconds = 4; + + // An RFC3339 timestamp from which to show logs. If this value + // precedes the time a pod was started, only logs since the pod start will be returned. + // If this value is in the future, no logs will be returned. + // Only one of sinceSeconds or sinceTime may be specified. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time sinceTime = 5; + + // If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line + // of log output. Defaults to false. + optional bool timestamps = 6; + + // If set, the number of lines from the end of the logs to show. If not specified, + // logs are shown from the creation of the container or sinceSeconds or sinceTime + optional int64 tailLines = 7; + + // If set, the number of bytes to read from the server before terminating the + // log output. This may not display a complete final line of logging, and may return + // slightly more or slightly less than the specified limit. + optional int64 limitBytes = 8; + + // NoWait if true causes the call to return immediately even if the deployment + // is not available yet. Otherwise the server will wait until the deployment has started. + // TODO: Fix the tag to 'noWait' in v2 + optional bool nowait = 9; + + // Version of the deployment for which to view logs. + optional int64 version = 10; +} + +// DeploymentRequest is a request to a deployment config for a new deployment. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message DeploymentRequest { + // Name of the deployment config for requesting a new deployment. + optional string name = 1; + + // Latest will update the deployment config with the latest state from all triggers. + optional bool latest = 2; + + // Force will try to force a new deployment to run. If the deployment config is paused, + // then setting this to true will return an Invalid error. + optional bool force = 3; + + // ExcludeTriggers instructs the instantiator to avoid processing the specified triggers. + // This field overrides the triggers from latest and allows clients to control specific + // logic. This field is ignored if not specified. + repeated string excludeTriggers = 4; +} + +// DeploymentStrategy describes how to perform a deployment. +message DeploymentStrategy { + // Type is the name of a deployment strategy. + // +optional + optional string type = 1; + + // CustomParams are the input to the Custom deployment strategy, and may also + // be specified for the Recreate and Rolling strategies to customize the execution + // process that runs the deployment. + optional CustomDeploymentStrategyParams customParams = 2; + + // RecreateParams are the input to the Recreate deployment strategy. + optional RecreateDeploymentStrategyParams recreateParams = 3; + + // RollingParams are the input to the Rolling deployment strategy. + optional RollingDeploymentStrategyParams rollingParams = 4; + + // Resources contains resource requirements to execute the deployment and any hooks. + optional k8s.io.api.core.v1.ResourceRequirements resources = 5; + + // Labels is a set of key, value pairs added to custom deployer and lifecycle pre/post hook pods. + map labels = 6; + + // Annotations is a set of key, value pairs added to custom deployer and lifecycle pre/post hook pods. + map annotations = 7; + + // ActiveDeadlineSeconds is the duration in seconds that the deployer pods for this deployment + // config may be active on a node before the system actively tries to terminate them. + optional int64 activeDeadlineSeconds = 8; +} + +// DeploymentTriggerImageChangeParams represents the parameters to the ImageChange trigger. +message DeploymentTriggerImageChangeParams { + // Automatic means that the detection of a new tag value should result in an image update + // inside the pod template. + optional bool automatic = 1; + + // ContainerNames is used to restrict tag updates to the specified set of container names in a pod. + // If multiple triggers point to the same containers, the resulting behavior is undefined. Future + // API versions will make this a validation error. If ContainerNames does not point to a valid container, + // the trigger will be ignored. Future API versions will make this a validation error. + repeated string containerNames = 2; + + // From is a reference to an image stream tag to watch for changes. From.Name is the only + // required subfield - if From.Namespace is blank, the namespace of the current deployment + // trigger will be used. + optional k8s.io.api.core.v1.ObjectReference from = 3; + + // LastTriggeredImage is the last image to be triggered. + optional string lastTriggeredImage = 4; +} + +// DeploymentTriggerPolicies is a list of policies where nil values and different from empty arrays. +// +protobuf.nullable=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +message DeploymentTriggerPolicies { + // items, if empty, will result in an empty slice + + repeated DeploymentTriggerPolicy items = 1; +} + +// DeploymentTriggerPolicy describes a policy for a single trigger that results in a new deployment. +message DeploymentTriggerPolicy { + // Type of the trigger + optional string type = 1; + + // ImageChangeParams represents the parameters for the ImageChange trigger. + optional DeploymentTriggerImageChangeParams imageChangeParams = 2; +} + +// ExecNewPodHook is a hook implementation which runs a command in a new pod +// based on the specified container which is assumed to be part of the +// deployment template. +message ExecNewPodHook { + // Command is the action command and its arguments. + repeated string command = 1; + + // Env is a set of environment variables to supply to the hook pod's container. + repeated k8s.io.api.core.v1.EnvVar env = 2; + + // ContainerName is the name of a container in the deployment pod template + // whose container image will be used for the hook pod's container. + optional string containerName = 3; + + // Volumes is a list of named volumes from the pod template which should be + // copied to the hook pod. Volumes names not found in pod spec are ignored. + // An empty list means no volumes will be copied. + repeated string volumes = 4; +} + +// LifecycleHook defines a specific deployment lifecycle action. Only one type of action may be specified at any time. +message LifecycleHook { + // FailurePolicy specifies what action to take if the hook fails. + optional string failurePolicy = 1; + + // ExecNewPod specifies the options for a lifecycle hook backed by a pod. + optional ExecNewPodHook execNewPod = 2; + + // TagImages instructs the deployer to tag the current image referenced under a container onto an image stream tag. + repeated TagImageHook tagImages = 3; +} + +// RecreateDeploymentStrategyParams are the input to the Recreate deployment +// strategy. +message RecreateDeploymentStrategyParams { + // TimeoutSeconds is the time to wait for updates before giving up. If the + // value is nil, a default will be used. + optional int64 timeoutSeconds = 1; + + // Pre is a lifecycle hook which is executed before the strategy manipulates + // the deployment. All LifecycleHookFailurePolicy values are supported. + optional LifecycleHook pre = 2; + + // Mid is a lifecycle hook which is executed while the deployment is scaled down to zero before the first new + // pod is created. All LifecycleHookFailurePolicy values are supported. + optional LifecycleHook mid = 3; + + // Post is a lifecycle hook which is executed after the strategy has + // finished all deployment logic. All LifecycleHookFailurePolicy values are supported. + optional LifecycleHook post = 4; +} + +// RollingDeploymentStrategyParams are the input to the Rolling deployment +// strategy. +message RollingDeploymentStrategyParams { + // UpdatePeriodSeconds is the time to wait between individual pod updates. + // If the value is nil, a default will be used. + optional int64 updatePeriodSeconds = 1; + + // IntervalSeconds is the time to wait between polling deployment status + // after update. If the value is nil, a default will be used. + optional int64 intervalSeconds = 2; + + // TimeoutSeconds is the time to wait for updates before giving up. If the + // value is nil, a default will be used. + optional int64 timeoutSeconds = 3; + + // MaxUnavailable is the maximum number of pods that can be unavailable + // during the update. Value can be an absolute number (ex: 5) or a + // percentage of total pods at the start of update (ex: 10%). Absolute + // number is calculated from percentage by rounding down. + // + // This cannot be 0 if MaxSurge is 0. By default, 25% is used. + // + // Example: when this is set to 30%, the old RC can be scaled down by 30% + // immediately when the rolling update starts. Once new pods are ready, old + // RC can be scaled down further, followed by scaling up the new RC, + // ensuring that at least 70% of original number of pods are available at + // all times during the update. + optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 4; + + // MaxSurge is the maximum number of pods that can be scheduled above the + // original number of pods. Value can be an absolute number (ex: 5) or a + // percentage of total pods at the start of the update (ex: 10%). Absolute + // number is calculated from percentage by rounding up. + // + // This cannot be 0 if MaxUnavailable is 0. By default, 25% is used. + // + // Example: when this is set to 30%, the new RC can be scaled up by 30% + // immediately when the rolling update starts. Once old pods have been + // killed, new RC can be scaled up further, ensuring that total number of + // pods running at any time during the update is atmost 130% of original + // pods. + optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 5; + + // Pre is a lifecycle hook which is executed before the deployment process + // begins. All LifecycleHookFailurePolicy values are supported. + optional LifecycleHook pre = 7; + + // Post is a lifecycle hook which is executed after the strategy has + // finished all deployment logic. All LifecycleHookFailurePolicy values + // are supported. + optional LifecycleHook post = 8; +} + +// TagImageHook is a request to tag the image in a particular container onto an ImageStreamTag. +message TagImageHook { + // ContainerName is the name of a container in the deployment config whose image value will be used as the source of the tag. If there is only a single + // container this value will be defaulted to the name of that container. + optional string containerName = 1; + + // To is the target ImageStreamTag to set the container's image onto. + optional k8s.io.api.core.v1.ObjectReference to = 2; +} + diff --git a/vendor/github.com/openshift/api/apps/v1/legacy.go b/vendor/github.com/openshift/api/apps/v1/legacy.go new file mode 100644 index 000000000..c8fa0ed99 --- /dev/null +++ b/vendor/github.com/openshift/api/apps/v1/legacy.go @@ -0,0 +1,28 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + legacyGroupVersion = schema.GroupVersion{Group: "", Version: "v1"} + legacySchemeBuilder = runtime.NewSchemeBuilder(addLegacyKnownTypes, corev1.AddToScheme, extensionsv1beta1.AddToScheme) + DeprecatedInstallWithoutGroup = legacySchemeBuilder.AddToScheme +) + +func addLegacyKnownTypes(scheme *runtime.Scheme) error { + types := []runtime.Object{ + &DeploymentConfig{}, + &DeploymentConfigList{}, + &DeploymentConfigRollback{}, + &DeploymentRequest{}, + &DeploymentLog{}, + &DeploymentLogOptions{}, + &extensionsv1beta1.Scale{}, + } + scheme.AddKnownTypes(legacyGroupVersion, types...) + return nil +} diff --git a/vendor/github.com/openshift/api/apps/v1/register.go b/vendor/github.com/openshift/api/apps/v1/register.go new file mode 100644 index 000000000..0c1e47e6d --- /dev/null +++ b/vendor/github.com/openshift/api/apps/v1/register.go @@ -0,0 +1,45 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + GroupName = "apps.openshift.io" + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, corev1.AddToScheme, extensionsv1beta1.AddToScheme) + // Install is a function which adds this version to a scheme + Install = schemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = schemeBuilder.AddToScheme +) + +// Resource generated code relies on this being here, but it logically belongs to the group +// DEPRECATED +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &DeploymentConfig{}, + &DeploymentConfigList{}, + &DeploymentConfigRollback{}, + &DeploymentRequest{}, + &DeploymentLog{}, + &DeploymentLogOptions{}, + &extensionsv1beta1.Scale{}, + ) + metav1.AddToGroupVersion(scheme, GroupVersion) + return nil +} diff --git a/vendor/github.com/openshift/api/apps/v1/types.go b/vendor/github.com/openshift/api/apps/v1/types.go new file mode 100644 index 000000000..1465aea27 --- /dev/null +++ b/vendor/github.com/openshift/api/apps/v1/types.go @@ -0,0 +1,537 @@ +package v1 + +import ( + "fmt" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +// +genclient +// +genclient:method=Instantiate,verb=create,subresource=instantiate,input=DeploymentRequest +// +genclient:method=Rollback,verb=create,subresource=rollback,input=DeploymentConfigRollback +// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/extensions/v1beta1.Scale +// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/extensions/v1beta1.Scale,result=k8s.io/api/extensions/v1beta1.Scale +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=3.0 +// +k8s:prerelease-lifecycle-gen:deprecated=4.14 +// +k8s:prerelease-lifecycle-gen:removed=4.10000 + +// Deployment Configs define the template for a pod and manages deploying new images or configuration changes. +// A single deployment configuration is usually analogous to a single micro-service. Can support many different +// deployment patterns, including full restart, customizable rolling updates, and fully custom behaviors, as +// well as pre- and post- deployment hooks. Each individual deployment is represented as a replication controller. +// +// A deployment is "triggered" when its configuration is changed or a tag in an Image Stream is changed. +// Triggers can be disabled to allow manual control over a deployment. The "strategy" determines how the deployment +// is carried out and may be changed at any time. The `latestVersion` field is updated when a new deployment +// is triggered by any means. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// Deprecated: Use deployments or other means for declarative updates for pods instead. +// +openshift:compatibility-gen:level=1 +type DeploymentConfig struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec represents a desired deployment state and how to deploy to it. + Spec DeploymentConfigSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + + // Status represents the current deployment state. + // +optional + Status DeploymentConfigStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// DeploymentConfigSpec represents the desired state of the deployment. +type DeploymentConfigSpec struct { + // Strategy describes how a deployment is executed. + // +optional + Strategy DeploymentStrategy `json:"strategy" protobuf:"bytes,1,opt,name=strategy"` + + // MinReadySeconds is the minimum number of seconds for which a newly created pod should + // be ready without any of its container crashing, for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,9,opt,name=minReadySeconds"` + + // Triggers determine how updates to a DeploymentConfig result in new deployments. If no triggers + // are defined, a new deployment can only occur as a result of an explicit client update to the + // DeploymentConfig with a new LatestVersion. If null, defaults to having a config change trigger. + // +optional + Triggers DeploymentTriggerPolicies `json:"triggers" protobuf:"bytes,2,rep,name=triggers"` + + // Replicas is the number of desired replicas. + // +optional + Replicas int32 `json:"replicas" protobuf:"varint,3,opt,name=replicas"` + + // RevisionHistoryLimit is the number of old ReplicationControllers to retain to allow for rollbacks. + // This field is a pointer to allow for differentiation between an explicit zero and not specified. + // Defaults to 10. (This only applies to DeploymentConfigs created via the new group API resource, not the legacy resource.) + RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,4,opt,name=revisionHistoryLimit"` + + // Test ensures that this deployment config will have zero replicas except while a deployment is running. This allows the + // deployment config to be used as a continuous deployment test - triggering on images, running the deployment, and then succeeding + // or failing. Post strategy hooks and After actions can be used to integrate successful deployment with an action. + // +optional + Test bool `json:"test" protobuf:"varint,5,opt,name=test"` + + // Paused indicates that the deployment config is paused resulting in no new deployments on template + // changes or changes in the template caused by other triggers. + Paused bool `json:"paused,omitempty" protobuf:"varint,6,opt,name=paused"` + + // Selector is a label query over pods that should match the Replicas count. + Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,7,rep,name=selector"` + + // Template is the object that describes the pod that will be created if + // insufficient replicas are detected. + Template *corev1.PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,8,opt,name=template"` +} + +// DeploymentStrategy describes how to perform a deployment. +type DeploymentStrategy struct { + // Type is the name of a deployment strategy. + // +optional + Type DeploymentStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=DeploymentStrategyType"` + + // CustomParams are the input to the Custom deployment strategy, and may also + // be specified for the Recreate and Rolling strategies to customize the execution + // process that runs the deployment. + CustomParams *CustomDeploymentStrategyParams `json:"customParams,omitempty" protobuf:"bytes,2,opt,name=customParams"` + // RecreateParams are the input to the Recreate deployment strategy. + RecreateParams *RecreateDeploymentStrategyParams `json:"recreateParams,omitempty" protobuf:"bytes,3,opt,name=recreateParams"` + // RollingParams are the input to the Rolling deployment strategy. + RollingParams *RollingDeploymentStrategyParams `json:"rollingParams,omitempty" protobuf:"bytes,4,opt,name=rollingParams"` + + // Resources contains resource requirements to execute the deployment and any hooks. + Resources corev1.ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,5,opt,name=resources"` + // Labels is a set of key, value pairs added to custom deployer and lifecycle pre/post hook pods. + Labels map[string]string `json:"labels,omitempty" protobuf:"bytes,6,rep,name=labels"` + // Annotations is a set of key, value pairs added to custom deployer and lifecycle pre/post hook pods. + Annotations map[string]string `json:"annotations,omitempty" protobuf:"bytes,7,rep,name=annotations"` + + // ActiveDeadlineSeconds is the duration in seconds that the deployer pods for this deployment + // config may be active on a node before the system actively tries to terminate them. + ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty" protobuf:"varint,8,opt,name=activeDeadlineSeconds"` +} + +// DeploymentStrategyType refers to a specific DeploymentStrategy implementation. +type DeploymentStrategyType string + +const ( + // DeploymentStrategyTypeRecreate is a simple strategy suitable as a default. + DeploymentStrategyTypeRecreate DeploymentStrategyType = "Recreate" + // DeploymentStrategyTypeCustom is a user defined strategy. + DeploymentStrategyTypeCustom DeploymentStrategyType = "Custom" + // DeploymentStrategyTypeRolling uses the Kubernetes RollingUpdater. + DeploymentStrategyTypeRolling DeploymentStrategyType = "Rolling" +) + +// CustomDeploymentStrategyParams are the input to the Custom deployment strategy. +type CustomDeploymentStrategyParams struct { + // Image specifies a container image which can carry out a deployment. + Image string `json:"image,omitempty" protobuf:"bytes,1,opt,name=image"` + // Environment holds the environment which will be given to the container for Image. + Environment []corev1.EnvVar `json:"environment,omitempty" protobuf:"bytes,2,rep,name=environment"` + // Command is optional and overrides CMD in the container Image. + Command []string `json:"command,omitempty" protobuf:"bytes,3,rep,name=command"` +} + +// RecreateDeploymentStrategyParams are the input to the Recreate deployment +// strategy. +type RecreateDeploymentStrategyParams struct { + // TimeoutSeconds is the time to wait for updates before giving up. If the + // value is nil, a default will be used. + TimeoutSeconds *int64 `json:"timeoutSeconds,omitempty" protobuf:"varint,1,opt,name=timeoutSeconds"` + // Pre is a lifecycle hook which is executed before the strategy manipulates + // the deployment. All LifecycleHookFailurePolicy values are supported. + Pre *LifecycleHook `json:"pre,omitempty" protobuf:"bytes,2,opt,name=pre"` + // Mid is a lifecycle hook which is executed while the deployment is scaled down to zero before the first new + // pod is created. All LifecycleHookFailurePolicy values are supported. + Mid *LifecycleHook `json:"mid,omitempty" protobuf:"bytes,3,opt,name=mid"` + // Post is a lifecycle hook which is executed after the strategy has + // finished all deployment logic. All LifecycleHookFailurePolicy values are supported. + Post *LifecycleHook `json:"post,omitempty" protobuf:"bytes,4,opt,name=post"` +} + +// RollingDeploymentStrategyParams are the input to the Rolling deployment +// strategy. +type RollingDeploymentStrategyParams struct { + // UpdatePeriodSeconds is the time to wait between individual pod updates. + // If the value is nil, a default will be used. + UpdatePeriodSeconds *int64 `json:"updatePeriodSeconds,omitempty" protobuf:"varint,1,opt,name=updatePeriodSeconds"` + // IntervalSeconds is the time to wait between polling deployment status + // after update. If the value is nil, a default will be used. + IntervalSeconds *int64 `json:"intervalSeconds,omitempty" protobuf:"varint,2,opt,name=intervalSeconds"` + // TimeoutSeconds is the time to wait for updates before giving up. If the + // value is nil, a default will be used. + TimeoutSeconds *int64 `json:"timeoutSeconds,omitempty" protobuf:"varint,3,opt,name=timeoutSeconds"` + // MaxUnavailable is the maximum number of pods that can be unavailable + // during the update. Value can be an absolute number (ex: 5) or a + // percentage of total pods at the start of update (ex: 10%). Absolute + // number is calculated from percentage by rounding down. + // + // This cannot be 0 if MaxSurge is 0. By default, 25% is used. + // + // Example: when this is set to 30%, the old RC can be scaled down by 30% + // immediately when the rolling update starts. Once new pods are ready, old + // RC can be scaled down further, followed by scaling up the new RC, + // ensuring that at least 70% of original number of pods are available at + // all times during the update. + MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"bytes,4,opt,name=maxUnavailable"` + // MaxSurge is the maximum number of pods that can be scheduled above the + // original number of pods. Value can be an absolute number (ex: 5) or a + // percentage of total pods at the start of the update (ex: 10%). Absolute + // number is calculated from percentage by rounding up. + // + // This cannot be 0 if MaxUnavailable is 0. By default, 25% is used. + // + // Example: when this is set to 30%, the new RC can be scaled up by 30% + // immediately when the rolling update starts. Once old pods have been + // killed, new RC can be scaled up further, ensuring that total number of + // pods running at any time during the update is atmost 130% of original + // pods. + MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty" protobuf:"bytes,5,opt,name=maxSurge"` + // Pre is a lifecycle hook which is executed before the deployment process + // begins. All LifecycleHookFailurePolicy values are supported. + Pre *LifecycleHook `json:"pre,omitempty" protobuf:"bytes,7,opt,name=pre"` + // Post is a lifecycle hook which is executed after the strategy has + // finished all deployment logic. All LifecycleHookFailurePolicy values + // are supported. + Post *LifecycleHook `json:"post,omitempty" protobuf:"bytes,8,opt,name=post"` +} + +// LifecycleHook defines a specific deployment lifecycle action. Only one type of action may be specified at any time. +type LifecycleHook struct { + // FailurePolicy specifies what action to take if the hook fails. + FailurePolicy LifecycleHookFailurePolicy `json:"failurePolicy" protobuf:"bytes,1,opt,name=failurePolicy,casttype=LifecycleHookFailurePolicy"` + + // ExecNewPod specifies the options for a lifecycle hook backed by a pod. + ExecNewPod *ExecNewPodHook `json:"execNewPod,omitempty" protobuf:"bytes,2,opt,name=execNewPod"` + + // TagImages instructs the deployer to tag the current image referenced under a container onto an image stream tag. + TagImages []TagImageHook `json:"tagImages,omitempty" protobuf:"bytes,3,rep,name=tagImages"` +} + +// LifecycleHookFailurePolicy describes possibles actions to take if a hook fails. +type LifecycleHookFailurePolicy string + +const ( + // LifecycleHookFailurePolicyRetry means retry the hook until it succeeds. + LifecycleHookFailurePolicyRetry LifecycleHookFailurePolicy = "Retry" + // LifecycleHookFailurePolicyAbort means abort the deployment. + LifecycleHookFailurePolicyAbort LifecycleHookFailurePolicy = "Abort" + // LifecycleHookFailurePolicyIgnore means ignore failure and continue the deployment. + LifecycleHookFailurePolicyIgnore LifecycleHookFailurePolicy = "Ignore" +) + +// ExecNewPodHook is a hook implementation which runs a command in a new pod +// based on the specified container which is assumed to be part of the +// deployment template. +type ExecNewPodHook struct { + // Command is the action command and its arguments. + Command []string `json:"command" protobuf:"bytes,1,rep,name=command"` + // Env is a set of environment variables to supply to the hook pod's container. + Env []corev1.EnvVar `json:"env,omitempty" protobuf:"bytes,2,rep,name=env"` + // ContainerName is the name of a container in the deployment pod template + // whose container image will be used for the hook pod's container. + ContainerName string `json:"containerName" protobuf:"bytes,3,opt,name=containerName"` + // Volumes is a list of named volumes from the pod template which should be + // copied to the hook pod. Volumes names not found in pod spec are ignored. + // An empty list means no volumes will be copied. + Volumes []string `json:"volumes,omitempty" protobuf:"bytes,4,rep,name=volumes"` +} + +// TagImageHook is a request to tag the image in a particular container onto an ImageStreamTag. +type TagImageHook struct { + // ContainerName is the name of a container in the deployment config whose image value will be used as the source of the tag. If there is only a single + // container this value will be defaulted to the name of that container. + ContainerName string `json:"containerName" protobuf:"bytes,1,opt,name=containerName"` + // To is the target ImageStreamTag to set the container's image onto. + To corev1.ObjectReference `json:"to" protobuf:"bytes,2,opt,name=to"` +} + +// DeploymentTriggerPolicies is a list of policies where nil values and different from empty arrays. +// +protobuf.nullable=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +type DeploymentTriggerPolicies []DeploymentTriggerPolicy + +func (t DeploymentTriggerPolicies) String() string { + return fmt.Sprintf("%v", []DeploymentTriggerPolicy(t)) +} + +// DeploymentTriggerPolicy describes a policy for a single trigger that results in a new deployment. +type DeploymentTriggerPolicy struct { + // Type of the trigger + Type DeploymentTriggerType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=DeploymentTriggerType"` + // ImageChangeParams represents the parameters for the ImageChange trigger. + ImageChangeParams *DeploymentTriggerImageChangeParams `json:"imageChangeParams,omitempty" protobuf:"bytes,2,opt,name=imageChangeParams"` +} + +// DeploymentTriggerType refers to a specific DeploymentTriggerPolicy implementation. +type DeploymentTriggerType string + +const ( + // DeploymentTriggerOnImageChange will create new deployments in response to updated tags from + // a container image repository. + DeploymentTriggerOnImageChange DeploymentTriggerType = "ImageChange" + // DeploymentTriggerOnConfigChange will create new deployments in response to changes to + // the ControllerTemplate of a DeploymentConfig. + DeploymentTriggerOnConfigChange DeploymentTriggerType = "ConfigChange" +) + +// DeploymentTriggerImageChangeParams represents the parameters to the ImageChange trigger. +type DeploymentTriggerImageChangeParams struct { + // Automatic means that the detection of a new tag value should result in an image update + // inside the pod template. + Automatic bool `json:"automatic,omitempty" protobuf:"varint,1,opt,name=automatic"` + // ContainerNames is used to restrict tag updates to the specified set of container names in a pod. + // If multiple triggers point to the same containers, the resulting behavior is undefined. Future + // API versions will make this a validation error. If ContainerNames does not point to a valid container, + // the trigger will be ignored. Future API versions will make this a validation error. + ContainerNames []string `json:"containerNames,omitempty" protobuf:"bytes,2,rep,name=containerNames"` + // From is a reference to an image stream tag to watch for changes. From.Name is the only + // required subfield - if From.Namespace is blank, the namespace of the current deployment + // trigger will be used. + From corev1.ObjectReference `json:"from" protobuf:"bytes,3,opt,name=from"` + // LastTriggeredImage is the last image to be triggered. + LastTriggeredImage string `json:"lastTriggeredImage,omitempty" protobuf:"bytes,4,opt,name=lastTriggeredImage"` +} + +// DeploymentConfigStatus represents the current deployment state. +type DeploymentConfigStatus struct { + // LatestVersion is used to determine whether the current deployment associated with a deployment + // config is out of sync. + LatestVersion int64 `json:"latestVersion" protobuf:"varint,1,opt,name=latestVersion"` + // ObservedGeneration is the most recent generation observed by the deployment config controller. + ObservedGeneration int64 `json:"observedGeneration" protobuf:"varint,2,opt,name=observedGeneration"` + // Replicas is the total number of pods targeted by this deployment config. + Replicas int32 `json:"replicas" protobuf:"varint,3,opt,name=replicas"` + // UpdatedReplicas is the total number of non-terminated pods targeted by this deployment config + // that have the desired template spec. + UpdatedReplicas int32 `json:"updatedReplicas" protobuf:"varint,4,opt,name=updatedReplicas"` + // AvailableReplicas is the total number of available pods targeted by this deployment config. + AvailableReplicas int32 `json:"availableReplicas" protobuf:"varint,5,opt,name=availableReplicas"` + // UnavailableReplicas is the total number of unavailable pods targeted by this deployment config. + UnavailableReplicas int32 `json:"unavailableReplicas" protobuf:"varint,6,opt,name=unavailableReplicas"` + // Details are the reasons for the update to this deployment config. + // This could be based on a change made by the user or caused by an automatic trigger + Details *DeploymentDetails `json:"details,omitempty" protobuf:"bytes,7,opt,name=details"` + // Conditions represents the latest available observations of a deployment config's current state. + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []DeploymentCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,8,rep,name=conditions"` + // Total number of ready pods targeted by this deployment. + ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,9,opt,name=readyReplicas"` +} + +// DeploymentDetails captures information about the causes of a deployment. +type DeploymentDetails struct { + // Message is the user specified change message, if this deployment was triggered manually by the user + Message string `json:"message,omitempty" protobuf:"bytes,1,opt,name=message"` + // Causes are extended data associated with all the causes for creating a new deployment + Causes []DeploymentCause `json:"causes" protobuf:"bytes,2,rep,name=causes"` +} + +// DeploymentCause captures information about a particular cause of a deployment. +type DeploymentCause struct { + // Type of the trigger that resulted in the creation of a new deployment + Type DeploymentTriggerType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=DeploymentTriggerType"` + // ImageTrigger contains the image trigger details, if this trigger was fired based on an image change + ImageTrigger *DeploymentCauseImageTrigger `json:"imageTrigger,omitempty" protobuf:"bytes,2,opt,name=imageTrigger"` +} + +// DeploymentCauseImageTrigger represents details about the cause of a deployment originating +// from an image change trigger +type DeploymentCauseImageTrigger struct { + // From is a reference to the changed object which triggered a deployment. The field may have + // the kinds DockerImage, ImageStreamTag, or ImageStreamImage. + From corev1.ObjectReference `json:"from" protobuf:"bytes,1,opt,name=from"` +} + +type DeploymentConditionType string + +// These are valid conditions of a DeploymentConfig. +const ( + // DeploymentAvailable means the DeploymentConfig is available, ie. at least the minimum available + // replicas required (dc.spec.replicas in case the DeploymentConfig is of Recreate type, + // dc.spec.replicas - dc.spec.strategy.rollingParams.maxUnavailable in case it's Rolling) are up and + // running for at least dc.spec.minReadySeconds. + DeploymentAvailable DeploymentConditionType = "Available" + // DeploymentProgressing is: + // * True: the DeploymentConfig has been successfully deployed or is amidst getting deployed. + // The two different states can be determined by looking at the Reason of the Condition. + // For example, a complete DC will have {Status: True, Reason: NewReplicationControllerAvailable} + // and a DC in the middle of a rollout {Status: True, Reason: ReplicationControllerUpdated}. + // TODO: Represent a successfully deployed DC by using something else for Status like Unknown? + // * False: the DeploymentConfig has failed to deploy its latest version. + // + // This condition is purely informational and depends on the dc.spec.strategy.*params.timeoutSeconds + // field, which is responsible for the time in seconds to wait for a rollout before deciding that + // no progress can be made, thus the rollout is aborted. + // + // Progress for a DeploymentConfig is considered when new pods scale up or old pods scale down. + DeploymentProgressing DeploymentConditionType = "Progressing" + // DeploymentReplicaFailure is added in a deployment config when one of its pods + // fails to be created or deleted. + DeploymentReplicaFailure DeploymentConditionType = "ReplicaFailure" +) + +// DeploymentCondition describes the state of a deployment config at a certain point. +type DeploymentCondition struct { + // Type of deployment condition. + Type DeploymentConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=DeploymentConditionType"` + // Status of the condition, one of True, False, Unknown. + Status corev1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/kubernetes/pkg/api/v1.ConditionStatus"` + // The last time this condition was updated. + LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty" protobuf:"bytes,6,opt,name=lastUpdateTime"` + // The last time the condition transitioned from one status to another. + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // The reason for the condition's last transition. + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // A human readable message indicating details about the transition. + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=3.0 +// +k8s:prerelease-lifecycle-gen:deprecated=4.14 +// +k8s:prerelease-lifecycle-gen:removed=4.10000 + +// DeploymentConfigList is a collection of deployment configs. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type DeploymentConfigList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of deployment configs + Items []DeploymentConfig `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=3.0 +// +k8s:prerelease-lifecycle-gen:deprecated=4.14 +// +k8s:prerelease-lifecycle-gen:removed=4.10000 + +// DeploymentConfigRollback provides the input to rollback generation. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type DeploymentConfigRollback struct { + metav1.TypeMeta `json:",inline"` + // Name of the deployment config that will be rolled back. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // UpdatedAnnotations is a set of new annotations that will be added in the deployment config. + UpdatedAnnotations map[string]string `json:"updatedAnnotations,omitempty" protobuf:"bytes,2,rep,name=updatedAnnotations"` + // Spec defines the options to rollback generation. + Spec DeploymentConfigRollbackSpec `json:"spec" protobuf:"bytes,3,opt,name=spec"` +} + +// DeploymentConfigRollbackSpec represents the options for rollback generation. +type DeploymentConfigRollbackSpec struct { + // From points to a ReplicationController which is a deployment. + From corev1.ObjectReference `json:"from" protobuf:"bytes,1,opt,name=from"` + // Revision to rollback to. If set to 0, rollback to the last revision. + Revision int64 `json:"revision,omitempty" protobuf:"varint,2,opt,name=revision"` + // IncludeTriggers specifies whether to include config Triggers. + IncludeTriggers bool `json:"includeTriggers" protobuf:"varint,3,opt,name=includeTriggers"` + // IncludeTemplate specifies whether to include the PodTemplateSpec. + IncludeTemplate bool `json:"includeTemplate" protobuf:"varint,4,opt,name=includeTemplate"` + // IncludeReplicationMeta specifies whether to include the replica count and selector. + IncludeReplicationMeta bool `json:"includeReplicationMeta" protobuf:"varint,5,opt,name=includeReplicationMeta"` + // IncludeStrategy specifies whether to include the deployment Strategy. + IncludeStrategy bool `json:"includeStrategy" protobuf:"varint,6,opt,name=includeStrategy"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=3.0 +// +k8s:prerelease-lifecycle-gen:deprecated=4.14 +// +k8s:prerelease-lifecycle-gen:removed=4.10000 + +// DeploymentRequest is a request to a deployment config for a new deployment. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type DeploymentRequest struct { + metav1.TypeMeta `json:",inline"` + // Name of the deployment config for requesting a new deployment. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // Latest will update the deployment config with the latest state from all triggers. + Latest bool `json:"latest" protobuf:"varint,2,opt,name=latest"` + // Force will try to force a new deployment to run. If the deployment config is paused, + // then setting this to true will return an Invalid error. + Force bool `json:"force" protobuf:"varint,3,opt,name=force"` + // ExcludeTriggers instructs the instantiator to avoid processing the specified triggers. + // This field overrides the triggers from latest and allows clients to control specific + // logic. This field is ignored if not specified. + ExcludeTriggers []DeploymentTriggerType `json:"excludeTriggers,omitempty" protobuf:"bytes,4,rep,name=excludeTriggers,casttype=DeploymentTriggerType"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=3.0 +// +k8s:prerelease-lifecycle-gen:deprecated=4.14 +// +k8s:prerelease-lifecycle-gen:removed=4.10000 + +// DeploymentLog represents the logs for a deployment +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type DeploymentLog struct { + metav1.TypeMeta `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=3.0 +// +k8s:prerelease-lifecycle-gen:deprecated=4.14 +// +k8s:prerelease-lifecycle-gen:removed=4.10000 + +// DeploymentLogOptions is the REST options for a deployment log +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type DeploymentLogOptions struct { + metav1.TypeMeta `json:",inline"` + + // The container for which to stream logs. Defaults to only container if there is one container in the pod. + Container string `json:"container,omitempty" protobuf:"bytes,1,opt,name=container"` + // Follow if true indicates that the build log should be streamed until + // the build terminates. + Follow bool `json:"follow,omitempty" protobuf:"varint,2,opt,name=follow"` + // Return previous deployment logs. Defaults to false. + Previous bool `json:"previous,omitempty" protobuf:"varint,3,opt,name=previous"` + // A relative time in seconds before the current time from which to show logs. If this value + // precedes the time a pod was started, only logs since the pod start will be returned. + // If this value is in the future, no logs will be returned. + // Only one of sinceSeconds or sinceTime may be specified. + SinceSeconds *int64 `json:"sinceSeconds,omitempty" protobuf:"varint,4,opt,name=sinceSeconds"` + // An RFC3339 timestamp from which to show logs. If this value + // precedes the time a pod was started, only logs since the pod start will be returned. + // If this value is in the future, no logs will be returned. + // Only one of sinceSeconds or sinceTime may be specified. + SinceTime *metav1.Time `json:"sinceTime,omitempty" protobuf:"bytes,5,opt,name=sinceTime"` + // If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line + // of log output. Defaults to false. + Timestamps bool `json:"timestamps,omitempty" protobuf:"varint,6,opt,name=timestamps"` + // If set, the number of lines from the end of the logs to show. If not specified, + // logs are shown from the creation of the container or sinceSeconds or sinceTime + TailLines *int64 `json:"tailLines,omitempty" protobuf:"varint,7,opt,name=tailLines"` + // If set, the number of bytes to read from the server before terminating the + // log output. This may not display a complete final line of logging, and may return + // slightly more or slightly less than the specified limit. + LimitBytes *int64 `json:"limitBytes,omitempty" protobuf:"varint,8,opt,name=limitBytes"` + + // NoWait if true causes the call to return immediately even if the deployment + // is not available yet. Otherwise the server will wait until the deployment has started. + // TODO: Fix the tag to 'noWait' in v2 + NoWait bool `json:"nowait,omitempty" protobuf:"varint,9,opt,name=nowait"` + + // Version of the deployment for which to view logs. + Version *int64 `json:"version,omitempty" protobuf:"varint,10,opt,name=version"` +} diff --git a/vendor/github.com/openshift/api/apps/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/apps/v1/zz_generated.deepcopy.go new file mode 100644 index 000000000..11c22a80f --- /dev/null +++ b/vendor/github.com/openshift/api/apps/v1/zz_generated.deepcopy.go @@ -0,0 +1,682 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + intstr "k8s.io/apimachinery/pkg/util/intstr" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomDeploymentStrategyParams) DeepCopyInto(out *CustomDeploymentStrategyParams) { + *out = *in + if in.Environment != nil { + in, out := &in.Environment, &out.Environment + *out = make([]corev1.EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomDeploymentStrategyParams. +func (in *CustomDeploymentStrategyParams) DeepCopy() *CustomDeploymentStrategyParams { + if in == nil { + return nil + } + out := new(CustomDeploymentStrategyParams) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentCause) DeepCopyInto(out *DeploymentCause) { + *out = *in + if in.ImageTrigger != nil { + in, out := &in.ImageTrigger, &out.ImageTrigger + *out = new(DeploymentCauseImageTrigger) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentCause. +func (in *DeploymentCause) DeepCopy() *DeploymentCause { + if in == nil { + return nil + } + out := new(DeploymentCause) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentCauseImageTrigger) DeepCopyInto(out *DeploymentCauseImageTrigger) { + *out = *in + out.From = in.From + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentCauseImageTrigger. +func (in *DeploymentCauseImageTrigger) DeepCopy() *DeploymentCauseImageTrigger { + if in == nil { + return nil + } + out := new(DeploymentCauseImageTrigger) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentCondition) DeepCopyInto(out *DeploymentCondition) { + *out = *in + in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime) + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentCondition. +func (in *DeploymentCondition) DeepCopy() *DeploymentCondition { + if in == nil { + return nil + } + out := new(DeploymentCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentConfig) DeepCopyInto(out *DeploymentConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentConfig. +func (in *DeploymentConfig) DeepCopy() *DeploymentConfig { + if in == nil { + return nil + } + out := new(DeploymentConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DeploymentConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentConfigList) DeepCopyInto(out *DeploymentConfigList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DeploymentConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentConfigList. +func (in *DeploymentConfigList) DeepCopy() *DeploymentConfigList { + if in == nil { + return nil + } + out := new(DeploymentConfigList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DeploymentConfigList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentConfigRollback) DeepCopyInto(out *DeploymentConfigRollback) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.UpdatedAnnotations != nil { + in, out := &in.UpdatedAnnotations, &out.UpdatedAnnotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + out.Spec = in.Spec + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentConfigRollback. +func (in *DeploymentConfigRollback) DeepCopy() *DeploymentConfigRollback { + if in == nil { + return nil + } + out := new(DeploymentConfigRollback) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DeploymentConfigRollback) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentConfigRollbackSpec) DeepCopyInto(out *DeploymentConfigRollbackSpec) { + *out = *in + out.From = in.From + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentConfigRollbackSpec. +func (in *DeploymentConfigRollbackSpec) DeepCopy() *DeploymentConfigRollbackSpec { + if in == nil { + return nil + } + out := new(DeploymentConfigRollbackSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentConfigSpec) DeepCopyInto(out *DeploymentConfigSpec) { + *out = *in + in.Strategy.DeepCopyInto(&out.Strategy) + if in.Triggers != nil { + in, out := &in.Triggers, &out.Triggers + *out = make(DeploymentTriggerPolicies, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.RevisionHistoryLimit != nil { + in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit + *out = new(int32) + **out = **in + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Template != nil { + in, out := &in.Template, &out.Template + *out = new(corev1.PodTemplateSpec) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentConfigSpec. +func (in *DeploymentConfigSpec) DeepCopy() *DeploymentConfigSpec { + if in == nil { + return nil + } + out := new(DeploymentConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentConfigStatus) DeepCopyInto(out *DeploymentConfigStatus) { + *out = *in + if in.Details != nil { + in, out := &in.Details, &out.Details + *out = new(DeploymentDetails) + (*in).DeepCopyInto(*out) + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]DeploymentCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentConfigStatus. +func (in *DeploymentConfigStatus) DeepCopy() *DeploymentConfigStatus { + if in == nil { + return nil + } + out := new(DeploymentConfigStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentDetails) DeepCopyInto(out *DeploymentDetails) { + *out = *in + if in.Causes != nil { + in, out := &in.Causes, &out.Causes + *out = make([]DeploymentCause, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentDetails. +func (in *DeploymentDetails) DeepCopy() *DeploymentDetails { + if in == nil { + return nil + } + out := new(DeploymentDetails) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentLog) DeepCopyInto(out *DeploymentLog) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentLog. +func (in *DeploymentLog) DeepCopy() *DeploymentLog { + if in == nil { + return nil + } + out := new(DeploymentLog) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DeploymentLog) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentLogOptions) DeepCopyInto(out *DeploymentLogOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.SinceSeconds != nil { + in, out := &in.SinceSeconds, &out.SinceSeconds + *out = new(int64) + **out = **in + } + if in.SinceTime != nil { + in, out := &in.SinceTime, &out.SinceTime + *out = (*in).DeepCopy() + } + if in.TailLines != nil { + in, out := &in.TailLines, &out.TailLines + *out = new(int64) + **out = **in + } + if in.LimitBytes != nil { + in, out := &in.LimitBytes, &out.LimitBytes + *out = new(int64) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(int64) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentLogOptions. +func (in *DeploymentLogOptions) DeepCopy() *DeploymentLogOptions { + if in == nil { + return nil + } + out := new(DeploymentLogOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DeploymentLogOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentRequest) DeepCopyInto(out *DeploymentRequest) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.ExcludeTriggers != nil { + in, out := &in.ExcludeTriggers, &out.ExcludeTriggers + *out = make([]DeploymentTriggerType, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentRequest. +func (in *DeploymentRequest) DeepCopy() *DeploymentRequest { + if in == nil { + return nil + } + out := new(DeploymentRequest) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DeploymentRequest) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentStrategy) DeepCopyInto(out *DeploymentStrategy) { + *out = *in + if in.CustomParams != nil { + in, out := &in.CustomParams, &out.CustomParams + *out = new(CustomDeploymentStrategyParams) + (*in).DeepCopyInto(*out) + } + if in.RecreateParams != nil { + in, out := &in.RecreateParams, &out.RecreateParams + *out = new(RecreateDeploymentStrategyParams) + (*in).DeepCopyInto(*out) + } + if in.RollingParams != nil { + in, out := &in.RollingParams, &out.RollingParams + *out = new(RollingDeploymentStrategyParams) + (*in).DeepCopyInto(*out) + } + in.Resources.DeepCopyInto(&out.Resources) + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ActiveDeadlineSeconds != nil { + in, out := &in.ActiveDeadlineSeconds, &out.ActiveDeadlineSeconds + *out = new(int64) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentStrategy. +func (in *DeploymentStrategy) DeepCopy() *DeploymentStrategy { + if in == nil { + return nil + } + out := new(DeploymentStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentTriggerImageChangeParams) DeepCopyInto(out *DeploymentTriggerImageChangeParams) { + *out = *in + if in.ContainerNames != nil { + in, out := &in.ContainerNames, &out.ContainerNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + out.From = in.From + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentTriggerImageChangeParams. +func (in *DeploymentTriggerImageChangeParams) DeepCopy() *DeploymentTriggerImageChangeParams { + if in == nil { + return nil + } + out := new(DeploymentTriggerImageChangeParams) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in DeploymentTriggerPolicies) DeepCopyInto(out *DeploymentTriggerPolicies) { + { + in := &in + *out = make(DeploymentTriggerPolicies, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentTriggerPolicies. +func (in DeploymentTriggerPolicies) DeepCopy() DeploymentTriggerPolicies { + if in == nil { + return nil + } + out := new(DeploymentTriggerPolicies) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentTriggerPolicy) DeepCopyInto(out *DeploymentTriggerPolicy) { + *out = *in + if in.ImageChangeParams != nil { + in, out := &in.ImageChangeParams, &out.ImageChangeParams + *out = new(DeploymentTriggerImageChangeParams) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentTriggerPolicy. +func (in *DeploymentTriggerPolicy) DeepCopy() *DeploymentTriggerPolicy { + if in == nil { + return nil + } + out := new(DeploymentTriggerPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExecNewPodHook) DeepCopyInto(out *ExecNewPodHook) { + *out = *in + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]corev1.EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecNewPodHook. +func (in *ExecNewPodHook) DeepCopy() *ExecNewPodHook { + if in == nil { + return nil + } + out := new(ExecNewPodHook) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LifecycleHook) DeepCopyInto(out *LifecycleHook) { + *out = *in + if in.ExecNewPod != nil { + in, out := &in.ExecNewPod, &out.ExecNewPod + *out = new(ExecNewPodHook) + (*in).DeepCopyInto(*out) + } + if in.TagImages != nil { + in, out := &in.TagImages, &out.TagImages + *out = make([]TagImageHook, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LifecycleHook. +func (in *LifecycleHook) DeepCopy() *LifecycleHook { + if in == nil { + return nil + } + out := new(LifecycleHook) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RecreateDeploymentStrategyParams) DeepCopyInto(out *RecreateDeploymentStrategyParams) { + *out = *in + if in.TimeoutSeconds != nil { + in, out := &in.TimeoutSeconds, &out.TimeoutSeconds + *out = new(int64) + **out = **in + } + if in.Pre != nil { + in, out := &in.Pre, &out.Pre + *out = new(LifecycleHook) + (*in).DeepCopyInto(*out) + } + if in.Mid != nil { + in, out := &in.Mid, &out.Mid + *out = new(LifecycleHook) + (*in).DeepCopyInto(*out) + } + if in.Post != nil { + in, out := &in.Post, &out.Post + *out = new(LifecycleHook) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RecreateDeploymentStrategyParams. +func (in *RecreateDeploymentStrategyParams) DeepCopy() *RecreateDeploymentStrategyParams { + if in == nil { + return nil + } + out := new(RecreateDeploymentStrategyParams) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RollingDeploymentStrategyParams) DeepCopyInto(out *RollingDeploymentStrategyParams) { + *out = *in + if in.UpdatePeriodSeconds != nil { + in, out := &in.UpdatePeriodSeconds, &out.UpdatePeriodSeconds + *out = new(int64) + **out = **in + } + if in.IntervalSeconds != nil { + in, out := &in.IntervalSeconds, &out.IntervalSeconds + *out = new(int64) + **out = **in + } + if in.TimeoutSeconds != nil { + in, out := &in.TimeoutSeconds, &out.TimeoutSeconds + *out = new(int64) + **out = **in + } + if in.MaxUnavailable != nil { + in, out := &in.MaxUnavailable, &out.MaxUnavailable + *out = new(intstr.IntOrString) + **out = **in + } + if in.MaxSurge != nil { + in, out := &in.MaxSurge, &out.MaxSurge + *out = new(intstr.IntOrString) + **out = **in + } + if in.Pre != nil { + in, out := &in.Pre, &out.Pre + *out = new(LifecycleHook) + (*in).DeepCopyInto(*out) + } + if in.Post != nil { + in, out := &in.Post, &out.Post + *out = new(LifecycleHook) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingDeploymentStrategyParams. +func (in *RollingDeploymentStrategyParams) DeepCopy() *RollingDeploymentStrategyParams { + if in == nil { + return nil + } + out := new(RollingDeploymentStrategyParams) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TagImageHook) DeepCopyInto(out *TagImageHook) { + *out = *in + out.To = in.To + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TagImageHook. +func (in *TagImageHook) DeepCopy() *TagImageHook { + if in == nil { + return nil + } + out := new(TagImageHook) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/api/apps/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/apps/v1/zz_generated.swagger_doc_generated.go new file mode 100644 index 000000000..ab137d59b --- /dev/null +++ b/vendor/github.com/openshift/api/apps/v1/zz_generated.swagger_doc_generated.go @@ -0,0 +1,284 @@ +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_CustomDeploymentStrategyParams = map[string]string{ + "": "CustomDeploymentStrategyParams are the input to the Custom deployment strategy.", + "image": "Image specifies a container image which can carry out a deployment.", + "environment": "Environment holds the environment which will be given to the container for Image.", + "command": "Command is optional and overrides CMD in the container Image.", +} + +func (CustomDeploymentStrategyParams) SwaggerDoc() map[string]string { + return map_CustomDeploymentStrategyParams +} + +var map_DeploymentCause = map[string]string{ + "": "DeploymentCause captures information about a particular cause of a deployment.", + "type": "Type of the trigger that resulted in the creation of a new deployment", + "imageTrigger": "ImageTrigger contains the image trigger details, if this trigger was fired based on an image change", +} + +func (DeploymentCause) SwaggerDoc() map[string]string { + return map_DeploymentCause +} + +var map_DeploymentCauseImageTrigger = map[string]string{ + "": "DeploymentCauseImageTrigger represents details about the cause of a deployment originating from an image change trigger", + "from": "From is a reference to the changed object which triggered a deployment. The field may have the kinds DockerImage, ImageStreamTag, or ImageStreamImage.", +} + +func (DeploymentCauseImageTrigger) SwaggerDoc() map[string]string { + return map_DeploymentCauseImageTrigger +} + +var map_DeploymentCondition = map[string]string{ + "": "DeploymentCondition describes the state of a deployment config at a certain point.", + "type": "Type of deployment condition.", + "status": "Status of the condition, one of True, False, Unknown.", + "lastUpdateTime": "The last time this condition was updated.", + "lastTransitionTime": "The last time the condition transitioned from one status to another.", + "reason": "The reason for the condition's last transition.", + "message": "A human readable message indicating details about the transition.", +} + +func (DeploymentCondition) SwaggerDoc() map[string]string { + return map_DeploymentCondition +} + +var map_DeploymentConfig = map[string]string{ + "": "Deployment Configs define the template for a pod and manages deploying new images or configuration changes. A single deployment configuration is usually analogous to a single micro-service. Can support many different deployment patterns, including full restart, customizable rolling updates, and fully custom behaviors, as well as pre- and post- deployment hooks. Each individual deployment is represented as a replication controller.\n\nA deployment is \"triggered\" when its configuration is changed or a tag in an Image Stream is changed. Triggers can be disabled to allow manual control over a deployment. The \"strategy\" determines how the deployment is carried out and may be changed at any time. The `latestVersion` field is updated when a new deployment is triggered by any means.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). Deprecated: Use deployments or other means for declarative updates for pods instead.", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Spec represents a desired deployment state and how to deploy to it.", + "status": "Status represents the current deployment state.", +} + +func (DeploymentConfig) SwaggerDoc() map[string]string { + return map_DeploymentConfig +} + +var map_DeploymentConfigList = map[string]string{ + "": "DeploymentConfigList is a collection of deployment configs.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "Items is a list of deployment configs", +} + +func (DeploymentConfigList) SwaggerDoc() map[string]string { + return map_DeploymentConfigList +} + +var map_DeploymentConfigRollback = map[string]string{ + "": "DeploymentConfigRollback provides the input to rollback generation.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "name": "Name of the deployment config that will be rolled back.", + "updatedAnnotations": "UpdatedAnnotations is a set of new annotations that will be added in the deployment config.", + "spec": "Spec defines the options to rollback generation.", +} + +func (DeploymentConfigRollback) SwaggerDoc() map[string]string { + return map_DeploymentConfigRollback +} + +var map_DeploymentConfigRollbackSpec = map[string]string{ + "": "DeploymentConfigRollbackSpec represents the options for rollback generation.", + "from": "From points to a ReplicationController which is a deployment.", + "revision": "Revision to rollback to. If set to 0, rollback to the last revision.", + "includeTriggers": "IncludeTriggers specifies whether to include config Triggers.", + "includeTemplate": "IncludeTemplate specifies whether to include the PodTemplateSpec.", + "includeReplicationMeta": "IncludeReplicationMeta specifies whether to include the replica count and selector.", + "includeStrategy": "IncludeStrategy specifies whether to include the deployment Strategy.", +} + +func (DeploymentConfigRollbackSpec) SwaggerDoc() map[string]string { + return map_DeploymentConfigRollbackSpec +} + +var map_DeploymentConfigSpec = map[string]string{ + "": "DeploymentConfigSpec represents the desired state of the deployment.", + "strategy": "Strategy describes how a deployment is executed.", + "minReadySeconds": "MinReadySeconds is the minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", + "triggers": "Triggers determine how updates to a DeploymentConfig result in new deployments. If no triggers are defined, a new deployment can only occur as a result of an explicit client update to the DeploymentConfig with a new LatestVersion. If null, defaults to having a config change trigger.", + "replicas": "Replicas is the number of desired replicas.", + "revisionHistoryLimit": "RevisionHistoryLimit is the number of old ReplicationControllers to retain to allow for rollbacks. This field is a pointer to allow for differentiation between an explicit zero and not specified. Defaults to 10. (This only applies to DeploymentConfigs created via the new group API resource, not the legacy resource.)", + "test": "Test ensures that this deployment config will have zero replicas except while a deployment is running. This allows the deployment config to be used as a continuous deployment test - triggering on images, running the deployment, and then succeeding or failing. Post strategy hooks and After actions can be used to integrate successful deployment with an action.", + "paused": "Paused indicates that the deployment config is paused resulting in no new deployments on template changes or changes in the template caused by other triggers.", + "selector": "Selector is a label query over pods that should match the Replicas count.", + "template": "Template is the object that describes the pod that will be created if insufficient replicas are detected.", +} + +func (DeploymentConfigSpec) SwaggerDoc() map[string]string { + return map_DeploymentConfigSpec +} + +var map_DeploymentConfigStatus = map[string]string{ + "": "DeploymentConfigStatus represents the current deployment state.", + "latestVersion": "LatestVersion is used to determine whether the current deployment associated with a deployment config is out of sync.", + "observedGeneration": "ObservedGeneration is the most recent generation observed by the deployment config controller.", + "replicas": "Replicas is the total number of pods targeted by this deployment config.", + "updatedReplicas": "UpdatedReplicas is the total number of non-terminated pods targeted by this deployment config that have the desired template spec.", + "availableReplicas": "AvailableReplicas is the total number of available pods targeted by this deployment config.", + "unavailableReplicas": "UnavailableReplicas is the total number of unavailable pods targeted by this deployment config.", + "details": "Details are the reasons for the update to this deployment config. This could be based on a change made by the user or caused by an automatic trigger", + "conditions": "Conditions represents the latest available observations of a deployment config's current state.", + "readyReplicas": "Total number of ready pods targeted by this deployment.", +} + +func (DeploymentConfigStatus) SwaggerDoc() map[string]string { + return map_DeploymentConfigStatus +} + +var map_DeploymentDetails = map[string]string{ + "": "DeploymentDetails captures information about the causes of a deployment.", + "message": "Message is the user specified change message, if this deployment was triggered manually by the user", + "causes": "Causes are extended data associated with all the causes for creating a new deployment", +} + +func (DeploymentDetails) SwaggerDoc() map[string]string { + return map_DeploymentDetails +} + +var map_DeploymentLog = map[string]string{ + "": "DeploymentLog represents the logs for a deployment\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", +} + +func (DeploymentLog) SwaggerDoc() map[string]string { + return map_DeploymentLog +} + +var map_DeploymentLogOptions = map[string]string{ + "": "DeploymentLogOptions is the REST options for a deployment log\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "container": "The container for which to stream logs. Defaults to only container if there is one container in the pod.", + "follow": "Follow if true indicates that the build log should be streamed until the build terminates.", + "previous": "Return previous deployment logs. Defaults to false.", + "sinceSeconds": "A relative time in seconds before the current time from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.", + "sinceTime": "An RFC3339 timestamp from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.", + "timestamps": "If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line of log output. Defaults to false.", + "tailLines": "If set, the number of lines from the end of the logs to show. If not specified, logs are shown from the creation of the container or sinceSeconds or sinceTime", + "limitBytes": "If set, the number of bytes to read from the server before terminating the log output. This may not display a complete final line of logging, and may return slightly more or slightly less than the specified limit.", + "nowait": "NoWait if true causes the call to return immediately even if the deployment is not available yet. Otherwise the server will wait until the deployment has started.", + "version": "Version of the deployment for which to view logs.", +} + +func (DeploymentLogOptions) SwaggerDoc() map[string]string { + return map_DeploymentLogOptions +} + +var map_DeploymentRequest = map[string]string{ + "": "DeploymentRequest is a request to a deployment config for a new deployment.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "name": "Name of the deployment config for requesting a new deployment.", + "latest": "Latest will update the deployment config with the latest state from all triggers.", + "force": "Force will try to force a new deployment to run. If the deployment config is paused, then setting this to true will return an Invalid error.", + "excludeTriggers": "ExcludeTriggers instructs the instantiator to avoid processing the specified triggers. This field overrides the triggers from latest and allows clients to control specific logic. This field is ignored if not specified.", +} + +func (DeploymentRequest) SwaggerDoc() map[string]string { + return map_DeploymentRequest +} + +var map_DeploymentStrategy = map[string]string{ + "": "DeploymentStrategy describes how to perform a deployment.", + "type": "Type is the name of a deployment strategy.", + "customParams": "CustomParams are the input to the Custom deployment strategy, and may also be specified for the Recreate and Rolling strategies to customize the execution process that runs the deployment.", + "recreateParams": "RecreateParams are the input to the Recreate deployment strategy.", + "rollingParams": "RollingParams are the input to the Rolling deployment strategy.", + "resources": "Resources contains resource requirements to execute the deployment and any hooks.", + "labels": "Labels is a set of key, value pairs added to custom deployer and lifecycle pre/post hook pods.", + "annotations": "Annotations is a set of key, value pairs added to custom deployer and lifecycle pre/post hook pods.", + "activeDeadlineSeconds": "ActiveDeadlineSeconds is the duration in seconds that the deployer pods for this deployment config may be active on a node before the system actively tries to terminate them.", +} + +func (DeploymentStrategy) SwaggerDoc() map[string]string { + return map_DeploymentStrategy +} + +var map_DeploymentTriggerImageChangeParams = map[string]string{ + "": "DeploymentTriggerImageChangeParams represents the parameters to the ImageChange trigger.", + "automatic": "Automatic means that the detection of a new tag value should result in an image update inside the pod template.", + "containerNames": "ContainerNames is used to restrict tag updates to the specified set of container names in a pod. If multiple triggers point to the same containers, the resulting behavior is undefined. Future API versions will make this a validation error. If ContainerNames does not point to a valid container, the trigger will be ignored. Future API versions will make this a validation error.", + "from": "From is a reference to an image stream tag to watch for changes. From.Name is the only required subfield - if From.Namespace is blank, the namespace of the current deployment trigger will be used.", + "lastTriggeredImage": "LastTriggeredImage is the last image to be triggered.", +} + +func (DeploymentTriggerImageChangeParams) SwaggerDoc() map[string]string { + return map_DeploymentTriggerImageChangeParams +} + +var map_DeploymentTriggerPolicy = map[string]string{ + "": "DeploymentTriggerPolicy describes a policy for a single trigger that results in a new deployment.", + "type": "Type of the trigger", + "imageChangeParams": "ImageChangeParams represents the parameters for the ImageChange trigger.", +} + +func (DeploymentTriggerPolicy) SwaggerDoc() map[string]string { + return map_DeploymentTriggerPolicy +} + +var map_ExecNewPodHook = map[string]string{ + "": "ExecNewPodHook is a hook implementation which runs a command in a new pod based on the specified container which is assumed to be part of the deployment template.", + "command": "Command is the action command and its arguments.", + "env": "Env is a set of environment variables to supply to the hook pod's container.", + "containerName": "ContainerName is the name of a container in the deployment pod template whose container image will be used for the hook pod's container.", + "volumes": "Volumes is a list of named volumes from the pod template which should be copied to the hook pod. Volumes names not found in pod spec are ignored. An empty list means no volumes will be copied.", +} + +func (ExecNewPodHook) SwaggerDoc() map[string]string { + return map_ExecNewPodHook +} + +var map_LifecycleHook = map[string]string{ + "": "LifecycleHook defines a specific deployment lifecycle action. Only one type of action may be specified at any time.", + "failurePolicy": "FailurePolicy specifies what action to take if the hook fails.", + "execNewPod": "ExecNewPod specifies the options for a lifecycle hook backed by a pod.", + "tagImages": "TagImages instructs the deployer to tag the current image referenced under a container onto an image stream tag.", +} + +func (LifecycleHook) SwaggerDoc() map[string]string { + return map_LifecycleHook +} + +var map_RecreateDeploymentStrategyParams = map[string]string{ + "": "RecreateDeploymentStrategyParams are the input to the Recreate deployment strategy.", + "timeoutSeconds": "TimeoutSeconds is the time to wait for updates before giving up. If the value is nil, a default will be used.", + "pre": "Pre is a lifecycle hook which is executed before the strategy manipulates the deployment. All LifecycleHookFailurePolicy values are supported.", + "mid": "Mid is a lifecycle hook which is executed while the deployment is scaled down to zero before the first new pod is created. All LifecycleHookFailurePolicy values are supported.", + "post": "Post is a lifecycle hook which is executed after the strategy has finished all deployment logic. All LifecycleHookFailurePolicy values are supported.", +} + +func (RecreateDeploymentStrategyParams) SwaggerDoc() map[string]string { + return map_RecreateDeploymentStrategyParams +} + +var map_RollingDeploymentStrategyParams = map[string]string{ + "": "RollingDeploymentStrategyParams are the input to the Rolling deployment strategy.", + "updatePeriodSeconds": "UpdatePeriodSeconds is the time to wait between individual pod updates. If the value is nil, a default will be used.", + "intervalSeconds": "IntervalSeconds is the time to wait between polling deployment status after update. If the value is nil, a default will be used.", + "timeoutSeconds": "TimeoutSeconds is the time to wait for updates before giving up. If the value is nil, a default will be used.", + "maxUnavailable": "MaxUnavailable is the maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total pods at the start of update (ex: 10%). Absolute number is calculated from percentage by rounding down.\n\nThis cannot be 0 if MaxSurge is 0. By default, 25% is used.\n\nExample: when this is set to 30%, the old RC can be scaled down by 30% immediately when the rolling update starts. Once new pods are ready, old RC can be scaled down further, followed by scaling up the new RC, ensuring that at least 70% of original number of pods are available at all times during the update.", + "maxSurge": "MaxSurge is the maximum number of pods that can be scheduled above the original number of pods. Value can be an absolute number (ex: 5) or a percentage of total pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding up.\n\nThis cannot be 0 if MaxUnavailable is 0. By default, 25% is used.\n\nExample: when this is set to 30%, the new RC can be scaled up by 30% immediately when the rolling update starts. Once old pods have been killed, new RC can be scaled up further, ensuring that total number of pods running at any time during the update is atmost 130% of original pods.", + "pre": "Pre is a lifecycle hook which is executed before the deployment process begins. All LifecycleHookFailurePolicy values are supported.", + "post": "Post is a lifecycle hook which is executed after the strategy has finished all deployment logic. All LifecycleHookFailurePolicy values are supported.", +} + +func (RollingDeploymentStrategyParams) SwaggerDoc() map[string]string { + return map_RollingDeploymentStrategyParams +} + +var map_TagImageHook = map[string]string{ + "": "TagImageHook is a request to tag the image in a particular container onto an ImageStreamTag.", + "containerName": "ContainerName is the name of a container in the deployment config whose image value will be used as the source of the tag. If there is only a single container this value will be defaulted to the name of that container.", + "to": "To is the target ImageStreamTag to set the container's image onto.", +} + +func (TagImageHook) SwaggerDoc() map[string]string { + return map_TagImageHook +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/api/apps/v1/zz_prerelease_lifecycle_generated.go b/vendor/github.com/openshift/api/apps/v1/zz_prerelease_lifecycle_generated.go new file mode 100644 index 000000000..b3e4de501 --- /dev/null +++ b/vendor/github.com/openshift/api/apps/v1/zz_prerelease_lifecycle_generated.go @@ -0,0 +1,114 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *DeploymentConfig) APILifecycleIntroduced() (major, minor int) { + return 3, 0 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *DeploymentConfig) APILifecycleDeprecated() (major, minor int) { + return 4, 14 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *DeploymentConfig) APILifecycleRemoved() (major, minor int) { + return 4, 10000 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *DeploymentConfigList) APILifecycleIntroduced() (major, minor int) { + return 3, 0 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *DeploymentConfigList) APILifecycleDeprecated() (major, minor int) { + return 4, 14 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *DeploymentConfigList) APILifecycleRemoved() (major, minor int) { + return 4, 10000 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *DeploymentConfigRollback) APILifecycleIntroduced() (major, minor int) { + return 3, 0 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *DeploymentConfigRollback) APILifecycleDeprecated() (major, minor int) { + return 4, 14 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *DeploymentConfigRollback) APILifecycleRemoved() (major, minor int) { + return 4, 10000 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *DeploymentLog) APILifecycleIntroduced() (major, minor int) { + return 3, 0 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *DeploymentLog) APILifecycleDeprecated() (major, minor int) { + return 4, 14 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *DeploymentLog) APILifecycleRemoved() (major, minor int) { + return 4, 10000 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *DeploymentLogOptions) APILifecycleIntroduced() (major, minor int) { + return 3, 0 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *DeploymentLogOptions) APILifecycleDeprecated() (major, minor int) { + return 4, 14 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *DeploymentLogOptions) APILifecycleRemoved() (major, minor int) { + return 4, 10000 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *DeploymentRequest) APILifecycleIntroduced() (major, minor int) { + return 3, 0 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *DeploymentRequest) APILifecycleDeprecated() (major, minor int) { + return 4, 14 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *DeploymentRequest) APILifecycleRemoved() (major, minor int) { + return 4, 10000 +} diff --git a/vendor/github.com/openshift/api/authorization/install.go b/vendor/github.com/openshift/api/authorization/install.go new file mode 100644 index 000000000..08ecc95f4 --- /dev/null +++ b/vendor/github.com/openshift/api/authorization/install.go @@ -0,0 +1,26 @@ +package authorization + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + authorizationv1 "github.com/openshift/api/authorization/v1" +) + +const ( + GroupName = "authorization.openshift.io" +) + +var ( + schemeBuilder = runtime.NewSchemeBuilder(authorizationv1.Install) + // Install is a function which adds every version of this group to a scheme + Install = schemeBuilder.AddToScheme +) + +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +func Kind(kind string) schema.GroupKind { + return schema.GroupKind{Group: GroupName, Kind: kind} +} diff --git a/vendor/github.com/openshift/api/authorization/v1/0000_03_authorization-openshift_01_rolebindingrestriction.crd.yaml b/vendor/github.com/openshift/api/authorization/v1/0000_03_authorization-openshift_01_rolebindingrestriction.crd.yaml new file mode 100644 index 000000000..c968a1c7e --- /dev/null +++ b/vendor/github.com/openshift/api/authorization/v1/0000_03_authorization-openshift_01_rolebindingrestriction.crd.yaml @@ -0,0 +1,158 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: rolebindingrestrictions.authorization.openshift.io +spec: + group: authorization.openshift.io + names: + kind: RoleBindingRestriction + listKind: RoleBindingRestrictionList + plural: rolebindingrestrictions + singular: rolebindingrestriction + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "RoleBindingRestriction is an object that can be matched against a subject (user, group, or service account) to determine whether rolebindings on that subject are allowed in the namespace to which the RoleBindingRestriction belongs. If any one of those RoleBindingRestriction objects matches a subject, rolebindings on that subject in the namespace are allowed. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + type: object + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Spec defines the matcher. + type: object + properties: + grouprestriction: + description: GroupRestriction matches against group subjects. + type: object + properties: + groups: + description: Groups is a list of groups used to match against an individual user's groups. If the user is a member of one of the whitelisted groups, the user is allowed to be bound to a role. + type: array + items: + type: string + nullable: true + labels: + description: Selectors specifies a list of label selectors over group labels. + type: array + items: + description: A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + nullable: true + nullable: true + serviceaccountrestriction: + description: ServiceAccountRestriction matches against service-account subjects. + type: object + properties: + namespaces: + description: Namespaces specifies a list of literal namespace names. + type: array + items: + type: string + serviceaccounts: + description: ServiceAccounts specifies a list of literal service-account names. + type: array + items: + description: ServiceAccountReference specifies a service account and namespace by their names. + type: object + properties: + name: + description: Name is the name of the service account. + type: string + namespace: + description: Namespace is the namespace of the service account. Service accounts from inside the whitelisted namespaces are allowed to be bound to roles. If Namespace is empty, then the namespace of the RoleBindingRestriction in which the ServiceAccountReference is embedded is used. + type: string + nullable: true + userrestriction: + description: UserRestriction matches against user subjects. + type: object + properties: + groups: + description: Groups specifies a list of literal group names. + type: array + items: + type: string + nullable: true + labels: + description: Selectors specifies a list of label selectors over user labels. + type: array + items: + description: A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + nullable: true + users: + description: Users specifies a list of literal user names. + type: array + items: + type: string + nullable: true + served: true + storage: true diff --git a/vendor/github.com/openshift/api/authorization/v1/Makefile b/vendor/github.com/openshift/api/authorization/v1/Makefile new file mode 100644 index 000000000..1e47c9fd9 --- /dev/null +++ b/vendor/github.com/openshift/api/authorization/v1/Makefile @@ -0,0 +1,3 @@ +.PHONY: test +test: + make -C ../../tests test GINKGO_EXTRA_ARGS=--focus="authorization.openshift.io/v1" diff --git a/vendor/github.com/openshift/api/authorization/v1/codec.go b/vendor/github.com/openshift/api/authorization/v1/codec.go new file mode 100644 index 000000000..61f1f9f51 --- /dev/null +++ b/vendor/github.com/openshift/api/authorization/v1/codec.go @@ -0,0 +1,139 @@ +package v1 + +import ( + "github.com/openshift/api/pkg/serialization" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +var _ runtime.NestedObjectDecoder = &PolicyRule{} +var _ runtime.NestedObjectEncoder = &PolicyRule{} + +func (c *PolicyRule) DecodeNestedObjects(d runtime.Decoder) error { + // decoding failures result in a runtime.Unknown object being created in Object and passed + // to conversion + serialization.DecodeNestedRawExtensionOrUnknown(d, &c.AttributeRestrictions) + return nil +} +func (c *PolicyRule) EncodeNestedObjects(e runtime.Encoder) error { + return serialization.EncodeNestedRawExtension(e, &c.AttributeRestrictions) +} + +var _ runtime.NestedObjectDecoder = &SelfSubjectRulesReview{} +var _ runtime.NestedObjectEncoder = &SelfSubjectRulesReview{} + +func (c *SelfSubjectRulesReview) DecodeNestedObjects(d runtime.Decoder) error { + // decoding failures result in a runtime.Unknown object being created in Object and passed + // to conversion + for i := range c.Status.Rules { + c.Status.Rules[i].DecodeNestedObjects(d) + } + return nil +} +func (c *SelfSubjectRulesReview) EncodeNestedObjects(e runtime.Encoder) error { + for i := range c.Status.Rules { + if err := c.Status.Rules[i].EncodeNestedObjects(e); err != nil { + return err + } + } + return nil +} + +var _ runtime.NestedObjectDecoder = &SubjectRulesReview{} +var _ runtime.NestedObjectEncoder = &SubjectRulesReview{} + +func (c *SubjectRulesReview) DecodeNestedObjects(d runtime.Decoder) error { + // decoding failures result in a runtime.Unknown object being created in Object and passed + // to conversion + for i := range c.Status.Rules { + c.Status.Rules[i].DecodeNestedObjects(d) + } + return nil +} +func (c *SubjectRulesReview) EncodeNestedObjects(e runtime.Encoder) error { + for i := range c.Status.Rules { + if err := c.Status.Rules[i].EncodeNestedObjects(e); err != nil { + return err + } + } + return nil +} + +var _ runtime.NestedObjectDecoder = &ClusterRole{} +var _ runtime.NestedObjectEncoder = &ClusterRole{} + +func (c *ClusterRole) DecodeNestedObjects(d runtime.Decoder) error { + // decoding failures result in a runtime.Unknown object being created in Object and passed + // to conversion + for i := range c.Rules { + c.Rules[i].DecodeNestedObjects(d) + } + return nil +} +func (c *ClusterRole) EncodeNestedObjects(e runtime.Encoder) error { + for i := range c.Rules { + if err := c.Rules[i].EncodeNestedObjects(e); err != nil { + return err + } + } + return nil +} + +var _ runtime.NestedObjectDecoder = &Role{} +var _ runtime.NestedObjectEncoder = &Role{} + +func (c *Role) DecodeNestedObjects(d runtime.Decoder) error { + // decoding failures result in a runtime.Unknown object being created in Object and passed + // to conversion + for i := range c.Rules { + c.Rules[i].DecodeNestedObjects(d) + } + return nil +} +func (c *Role) EncodeNestedObjects(e runtime.Encoder) error { + for i := range c.Rules { + if err := c.Rules[i].EncodeNestedObjects(e); err != nil { + return err + } + } + return nil +} + +var _ runtime.NestedObjectDecoder = &ClusterRoleList{} +var _ runtime.NestedObjectEncoder = &ClusterRoleList{} + +func (c *ClusterRoleList) DecodeNestedObjects(d runtime.Decoder) error { + // decoding failures result in a runtime.Unknown object being created in Object and passed + // to conversion + for i := range c.Items { + c.Items[i].DecodeNestedObjects(d) + } + return nil +} +func (c *ClusterRoleList) EncodeNestedObjects(e runtime.Encoder) error { + for i := range c.Items { + if err := c.Items[i].EncodeNestedObjects(e); err != nil { + return err + } + } + return nil +} + +var _ runtime.NestedObjectDecoder = &RoleList{} +var _ runtime.NestedObjectEncoder = &RoleList{} + +func (c *RoleList) DecodeNestedObjects(d runtime.Decoder) error { + // decoding failures result in a runtime.Unknown object being created in Object and passed + // to conversion + for i := range c.Items { + c.Items[i].DecodeNestedObjects(d) + } + return nil +} +func (c *RoleList) EncodeNestedObjects(e runtime.Encoder) error { + for i := range c.Items { + if err := c.Items[i].EncodeNestedObjects(e); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/openshift/api/authorization/v1/doc.go b/vendor/github.com/openshift/api/authorization/v1/doc.go new file mode 100644 index 000000000..a66741dce --- /dev/null +++ b/vendor/github.com/openshift/api/authorization/v1/doc.go @@ -0,0 +1,9 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=github.com/openshift/origin/pkg/authorization/apis/authorization +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true + +// +kubebuilder:validation:Optional +// +groupName=authorization.openshift.io +// Package v1 is the v1 version of the API. +package v1 diff --git a/vendor/github.com/openshift/api/authorization/v1/generated.pb.go b/vendor/github.com/openshift/api/authorization/v1/generated.pb.go new file mode 100644 index 000000000..4a38ab6f7 --- /dev/null +++ b/vendor/github.com/openshift/api/authorization/v1/generated.pb.go @@ -0,0 +1,8812 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/openshift/api/authorization/v1/generated.proto + +package v1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + v12 "k8s.io/api/core/v1" + v11 "k8s.io/api/rbac/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *Action) Reset() { *m = Action{} } +func (*Action) ProtoMessage() {} +func (*Action) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{0} +} +func (m *Action) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Action) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Action) XXX_Merge(src proto.Message) { + xxx_messageInfo_Action.Merge(m, src) +} +func (m *Action) XXX_Size() int { + return m.Size() +} +func (m *Action) XXX_DiscardUnknown() { + xxx_messageInfo_Action.DiscardUnknown(m) +} + +var xxx_messageInfo_Action proto.InternalMessageInfo + +func (m *ClusterRole) Reset() { *m = ClusterRole{} } +func (*ClusterRole) ProtoMessage() {} +func (*ClusterRole) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{1} +} +func (m *ClusterRole) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterRole) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterRole) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterRole.Merge(m, src) +} +func (m *ClusterRole) XXX_Size() int { + return m.Size() +} +func (m *ClusterRole) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterRole.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterRole proto.InternalMessageInfo + +func (m *ClusterRoleBinding) Reset() { *m = ClusterRoleBinding{} } +func (*ClusterRoleBinding) ProtoMessage() {} +func (*ClusterRoleBinding) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{2} +} +func (m *ClusterRoleBinding) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterRoleBinding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterRoleBinding) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterRoleBinding.Merge(m, src) +} +func (m *ClusterRoleBinding) XXX_Size() int { + return m.Size() +} +func (m *ClusterRoleBinding) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterRoleBinding.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterRoleBinding proto.InternalMessageInfo + +func (m *ClusterRoleBindingList) Reset() { *m = ClusterRoleBindingList{} } +func (*ClusterRoleBindingList) ProtoMessage() {} +func (*ClusterRoleBindingList) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{3} +} +func (m *ClusterRoleBindingList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterRoleBindingList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterRoleBindingList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterRoleBindingList.Merge(m, src) +} +func (m *ClusterRoleBindingList) XXX_Size() int { + return m.Size() +} +func (m *ClusterRoleBindingList) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterRoleBindingList.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterRoleBindingList proto.InternalMessageInfo + +func (m *ClusterRoleList) Reset() { *m = ClusterRoleList{} } +func (*ClusterRoleList) ProtoMessage() {} +func (*ClusterRoleList) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{4} +} +func (m *ClusterRoleList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterRoleList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterRoleList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterRoleList.Merge(m, src) +} +func (m *ClusterRoleList) XXX_Size() int { + return m.Size() +} +func (m *ClusterRoleList) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterRoleList.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterRoleList proto.InternalMessageInfo + +func (m *GroupRestriction) Reset() { *m = GroupRestriction{} } +func (*GroupRestriction) ProtoMessage() {} +func (*GroupRestriction) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{5} +} +func (m *GroupRestriction) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GroupRestriction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GroupRestriction) XXX_Merge(src proto.Message) { + xxx_messageInfo_GroupRestriction.Merge(m, src) +} +func (m *GroupRestriction) XXX_Size() int { + return m.Size() +} +func (m *GroupRestriction) XXX_DiscardUnknown() { + xxx_messageInfo_GroupRestriction.DiscardUnknown(m) +} + +var xxx_messageInfo_GroupRestriction proto.InternalMessageInfo + +func (m *IsPersonalSubjectAccessReview) Reset() { *m = IsPersonalSubjectAccessReview{} } +func (*IsPersonalSubjectAccessReview) ProtoMessage() {} +func (*IsPersonalSubjectAccessReview) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{6} +} +func (m *IsPersonalSubjectAccessReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IsPersonalSubjectAccessReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IsPersonalSubjectAccessReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_IsPersonalSubjectAccessReview.Merge(m, src) +} +func (m *IsPersonalSubjectAccessReview) XXX_Size() int { + return m.Size() +} +func (m *IsPersonalSubjectAccessReview) XXX_DiscardUnknown() { + xxx_messageInfo_IsPersonalSubjectAccessReview.DiscardUnknown(m) +} + +var xxx_messageInfo_IsPersonalSubjectAccessReview proto.InternalMessageInfo + +func (m *LocalResourceAccessReview) Reset() { *m = LocalResourceAccessReview{} } +func (*LocalResourceAccessReview) ProtoMessage() {} +func (*LocalResourceAccessReview) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{7} +} +func (m *LocalResourceAccessReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LocalResourceAccessReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LocalResourceAccessReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_LocalResourceAccessReview.Merge(m, src) +} +func (m *LocalResourceAccessReview) XXX_Size() int { + return m.Size() +} +func (m *LocalResourceAccessReview) XXX_DiscardUnknown() { + xxx_messageInfo_LocalResourceAccessReview.DiscardUnknown(m) +} + +var xxx_messageInfo_LocalResourceAccessReview proto.InternalMessageInfo + +func (m *LocalSubjectAccessReview) Reset() { *m = LocalSubjectAccessReview{} } +func (*LocalSubjectAccessReview) ProtoMessage() {} +func (*LocalSubjectAccessReview) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{8} +} +func (m *LocalSubjectAccessReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LocalSubjectAccessReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LocalSubjectAccessReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_LocalSubjectAccessReview.Merge(m, src) +} +func (m *LocalSubjectAccessReview) XXX_Size() int { + return m.Size() +} +func (m *LocalSubjectAccessReview) XXX_DiscardUnknown() { + xxx_messageInfo_LocalSubjectAccessReview.DiscardUnknown(m) +} + +var xxx_messageInfo_LocalSubjectAccessReview proto.InternalMessageInfo + +func (m *NamedClusterRole) Reset() { *m = NamedClusterRole{} } +func (*NamedClusterRole) ProtoMessage() {} +func (*NamedClusterRole) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{9} +} +func (m *NamedClusterRole) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NamedClusterRole) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NamedClusterRole) XXX_Merge(src proto.Message) { + xxx_messageInfo_NamedClusterRole.Merge(m, src) +} +func (m *NamedClusterRole) XXX_Size() int { + return m.Size() +} +func (m *NamedClusterRole) XXX_DiscardUnknown() { + xxx_messageInfo_NamedClusterRole.DiscardUnknown(m) +} + +var xxx_messageInfo_NamedClusterRole proto.InternalMessageInfo + +func (m *NamedClusterRoleBinding) Reset() { *m = NamedClusterRoleBinding{} } +func (*NamedClusterRoleBinding) ProtoMessage() {} +func (*NamedClusterRoleBinding) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{10} +} +func (m *NamedClusterRoleBinding) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NamedClusterRoleBinding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NamedClusterRoleBinding) XXX_Merge(src proto.Message) { + xxx_messageInfo_NamedClusterRoleBinding.Merge(m, src) +} +func (m *NamedClusterRoleBinding) XXX_Size() int { + return m.Size() +} +func (m *NamedClusterRoleBinding) XXX_DiscardUnknown() { + xxx_messageInfo_NamedClusterRoleBinding.DiscardUnknown(m) +} + +var xxx_messageInfo_NamedClusterRoleBinding proto.InternalMessageInfo + +func (m *NamedRole) Reset() { *m = NamedRole{} } +func (*NamedRole) ProtoMessage() {} +func (*NamedRole) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{11} +} +func (m *NamedRole) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NamedRole) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NamedRole) XXX_Merge(src proto.Message) { + xxx_messageInfo_NamedRole.Merge(m, src) +} +func (m *NamedRole) XXX_Size() int { + return m.Size() +} +func (m *NamedRole) XXX_DiscardUnknown() { + xxx_messageInfo_NamedRole.DiscardUnknown(m) +} + +var xxx_messageInfo_NamedRole proto.InternalMessageInfo + +func (m *NamedRoleBinding) Reset() { *m = NamedRoleBinding{} } +func (*NamedRoleBinding) ProtoMessage() {} +func (*NamedRoleBinding) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{12} +} +func (m *NamedRoleBinding) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NamedRoleBinding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NamedRoleBinding) XXX_Merge(src proto.Message) { + xxx_messageInfo_NamedRoleBinding.Merge(m, src) +} +func (m *NamedRoleBinding) XXX_Size() int { + return m.Size() +} +func (m *NamedRoleBinding) XXX_DiscardUnknown() { + xxx_messageInfo_NamedRoleBinding.DiscardUnknown(m) +} + +var xxx_messageInfo_NamedRoleBinding proto.InternalMessageInfo + +func (m *OptionalNames) Reset() { *m = OptionalNames{} } +func (*OptionalNames) ProtoMessage() {} +func (*OptionalNames) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{13} +} +func (m *OptionalNames) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *OptionalNames) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *OptionalNames) XXX_Merge(src proto.Message) { + xxx_messageInfo_OptionalNames.Merge(m, src) +} +func (m *OptionalNames) XXX_Size() int { + return m.Size() +} +func (m *OptionalNames) XXX_DiscardUnknown() { + xxx_messageInfo_OptionalNames.DiscardUnknown(m) +} + +var xxx_messageInfo_OptionalNames proto.InternalMessageInfo + +func (m *OptionalScopes) Reset() { *m = OptionalScopes{} } +func (*OptionalScopes) ProtoMessage() {} +func (*OptionalScopes) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{14} +} +func (m *OptionalScopes) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *OptionalScopes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *OptionalScopes) XXX_Merge(src proto.Message) { + xxx_messageInfo_OptionalScopes.Merge(m, src) +} +func (m *OptionalScopes) XXX_Size() int { + return m.Size() +} +func (m *OptionalScopes) XXX_DiscardUnknown() { + xxx_messageInfo_OptionalScopes.DiscardUnknown(m) +} + +var xxx_messageInfo_OptionalScopes proto.InternalMessageInfo + +func (m *PolicyRule) Reset() { *m = PolicyRule{} } +func (*PolicyRule) ProtoMessage() {} +func (*PolicyRule) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{15} +} +func (m *PolicyRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PolicyRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PolicyRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_PolicyRule.Merge(m, src) +} +func (m *PolicyRule) XXX_Size() int { + return m.Size() +} +func (m *PolicyRule) XXX_DiscardUnknown() { + xxx_messageInfo_PolicyRule.DiscardUnknown(m) +} + +var xxx_messageInfo_PolicyRule proto.InternalMessageInfo + +func (m *ResourceAccessReview) Reset() { *m = ResourceAccessReview{} } +func (*ResourceAccessReview) ProtoMessage() {} +func (*ResourceAccessReview) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{16} +} +func (m *ResourceAccessReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceAccessReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceAccessReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceAccessReview.Merge(m, src) +} +func (m *ResourceAccessReview) XXX_Size() int { + return m.Size() +} +func (m *ResourceAccessReview) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceAccessReview.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceAccessReview proto.InternalMessageInfo + +func (m *ResourceAccessReviewResponse) Reset() { *m = ResourceAccessReviewResponse{} } +func (*ResourceAccessReviewResponse) ProtoMessage() {} +func (*ResourceAccessReviewResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{17} +} +func (m *ResourceAccessReviewResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceAccessReviewResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceAccessReviewResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceAccessReviewResponse.Merge(m, src) +} +func (m *ResourceAccessReviewResponse) XXX_Size() int { + return m.Size() +} +func (m *ResourceAccessReviewResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceAccessReviewResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceAccessReviewResponse proto.InternalMessageInfo + +func (m *Role) Reset() { *m = Role{} } +func (*Role) ProtoMessage() {} +func (*Role) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{18} +} +func (m *Role) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Role) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Role) XXX_Merge(src proto.Message) { + xxx_messageInfo_Role.Merge(m, src) +} +func (m *Role) XXX_Size() int { + return m.Size() +} +func (m *Role) XXX_DiscardUnknown() { + xxx_messageInfo_Role.DiscardUnknown(m) +} + +var xxx_messageInfo_Role proto.InternalMessageInfo + +func (m *RoleBinding) Reset() { *m = RoleBinding{} } +func (*RoleBinding) ProtoMessage() {} +func (*RoleBinding) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{19} +} +func (m *RoleBinding) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RoleBinding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RoleBinding) XXX_Merge(src proto.Message) { + xxx_messageInfo_RoleBinding.Merge(m, src) +} +func (m *RoleBinding) XXX_Size() int { + return m.Size() +} +func (m *RoleBinding) XXX_DiscardUnknown() { + xxx_messageInfo_RoleBinding.DiscardUnknown(m) +} + +var xxx_messageInfo_RoleBinding proto.InternalMessageInfo + +func (m *RoleBindingList) Reset() { *m = RoleBindingList{} } +func (*RoleBindingList) ProtoMessage() {} +func (*RoleBindingList) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{20} +} +func (m *RoleBindingList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RoleBindingList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RoleBindingList) XXX_Merge(src proto.Message) { + xxx_messageInfo_RoleBindingList.Merge(m, src) +} +func (m *RoleBindingList) XXX_Size() int { + return m.Size() +} +func (m *RoleBindingList) XXX_DiscardUnknown() { + xxx_messageInfo_RoleBindingList.DiscardUnknown(m) +} + +var xxx_messageInfo_RoleBindingList proto.InternalMessageInfo + +func (m *RoleBindingRestriction) Reset() { *m = RoleBindingRestriction{} } +func (*RoleBindingRestriction) ProtoMessage() {} +func (*RoleBindingRestriction) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{21} +} +func (m *RoleBindingRestriction) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RoleBindingRestriction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RoleBindingRestriction) XXX_Merge(src proto.Message) { + xxx_messageInfo_RoleBindingRestriction.Merge(m, src) +} +func (m *RoleBindingRestriction) XXX_Size() int { + return m.Size() +} +func (m *RoleBindingRestriction) XXX_DiscardUnknown() { + xxx_messageInfo_RoleBindingRestriction.DiscardUnknown(m) +} + +var xxx_messageInfo_RoleBindingRestriction proto.InternalMessageInfo + +func (m *RoleBindingRestrictionList) Reset() { *m = RoleBindingRestrictionList{} } +func (*RoleBindingRestrictionList) ProtoMessage() {} +func (*RoleBindingRestrictionList) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{22} +} +func (m *RoleBindingRestrictionList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RoleBindingRestrictionList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RoleBindingRestrictionList) XXX_Merge(src proto.Message) { + xxx_messageInfo_RoleBindingRestrictionList.Merge(m, src) +} +func (m *RoleBindingRestrictionList) XXX_Size() int { + return m.Size() +} +func (m *RoleBindingRestrictionList) XXX_DiscardUnknown() { + xxx_messageInfo_RoleBindingRestrictionList.DiscardUnknown(m) +} + +var xxx_messageInfo_RoleBindingRestrictionList proto.InternalMessageInfo + +func (m *RoleBindingRestrictionSpec) Reset() { *m = RoleBindingRestrictionSpec{} } +func (*RoleBindingRestrictionSpec) ProtoMessage() {} +func (*RoleBindingRestrictionSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{23} +} +func (m *RoleBindingRestrictionSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RoleBindingRestrictionSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RoleBindingRestrictionSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_RoleBindingRestrictionSpec.Merge(m, src) +} +func (m *RoleBindingRestrictionSpec) XXX_Size() int { + return m.Size() +} +func (m *RoleBindingRestrictionSpec) XXX_DiscardUnknown() { + xxx_messageInfo_RoleBindingRestrictionSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_RoleBindingRestrictionSpec proto.InternalMessageInfo + +func (m *RoleList) Reset() { *m = RoleList{} } +func (*RoleList) ProtoMessage() {} +func (*RoleList) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{24} +} +func (m *RoleList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RoleList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RoleList) XXX_Merge(src proto.Message) { + xxx_messageInfo_RoleList.Merge(m, src) +} +func (m *RoleList) XXX_Size() int { + return m.Size() +} +func (m *RoleList) XXX_DiscardUnknown() { + xxx_messageInfo_RoleList.DiscardUnknown(m) +} + +var xxx_messageInfo_RoleList proto.InternalMessageInfo + +func (m *SelfSubjectRulesReview) Reset() { *m = SelfSubjectRulesReview{} } +func (*SelfSubjectRulesReview) ProtoMessage() {} +func (*SelfSubjectRulesReview) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{25} +} +func (m *SelfSubjectRulesReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SelfSubjectRulesReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SelfSubjectRulesReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_SelfSubjectRulesReview.Merge(m, src) +} +func (m *SelfSubjectRulesReview) XXX_Size() int { + return m.Size() +} +func (m *SelfSubjectRulesReview) XXX_DiscardUnknown() { + xxx_messageInfo_SelfSubjectRulesReview.DiscardUnknown(m) +} + +var xxx_messageInfo_SelfSubjectRulesReview proto.InternalMessageInfo + +func (m *SelfSubjectRulesReviewSpec) Reset() { *m = SelfSubjectRulesReviewSpec{} } +func (*SelfSubjectRulesReviewSpec) ProtoMessage() {} +func (*SelfSubjectRulesReviewSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{26} +} +func (m *SelfSubjectRulesReviewSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SelfSubjectRulesReviewSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SelfSubjectRulesReviewSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_SelfSubjectRulesReviewSpec.Merge(m, src) +} +func (m *SelfSubjectRulesReviewSpec) XXX_Size() int { + return m.Size() +} +func (m *SelfSubjectRulesReviewSpec) XXX_DiscardUnknown() { + xxx_messageInfo_SelfSubjectRulesReviewSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_SelfSubjectRulesReviewSpec proto.InternalMessageInfo + +func (m *ServiceAccountReference) Reset() { *m = ServiceAccountReference{} } +func (*ServiceAccountReference) ProtoMessage() {} +func (*ServiceAccountReference) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{27} +} +func (m *ServiceAccountReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceAccountReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ServiceAccountReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceAccountReference.Merge(m, src) +} +func (m *ServiceAccountReference) XXX_Size() int { + return m.Size() +} +func (m *ServiceAccountReference) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceAccountReference.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceAccountReference proto.InternalMessageInfo + +func (m *ServiceAccountRestriction) Reset() { *m = ServiceAccountRestriction{} } +func (*ServiceAccountRestriction) ProtoMessage() {} +func (*ServiceAccountRestriction) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{28} +} +func (m *ServiceAccountRestriction) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceAccountRestriction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ServiceAccountRestriction) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceAccountRestriction.Merge(m, src) +} +func (m *ServiceAccountRestriction) XXX_Size() int { + return m.Size() +} +func (m *ServiceAccountRestriction) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceAccountRestriction.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceAccountRestriction proto.InternalMessageInfo + +func (m *SubjectAccessReview) Reset() { *m = SubjectAccessReview{} } +func (*SubjectAccessReview) ProtoMessage() {} +func (*SubjectAccessReview) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{29} +} +func (m *SubjectAccessReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SubjectAccessReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SubjectAccessReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_SubjectAccessReview.Merge(m, src) +} +func (m *SubjectAccessReview) XXX_Size() int { + return m.Size() +} +func (m *SubjectAccessReview) XXX_DiscardUnknown() { + xxx_messageInfo_SubjectAccessReview.DiscardUnknown(m) +} + +var xxx_messageInfo_SubjectAccessReview proto.InternalMessageInfo + +func (m *SubjectAccessReviewResponse) Reset() { *m = SubjectAccessReviewResponse{} } +func (*SubjectAccessReviewResponse) ProtoMessage() {} +func (*SubjectAccessReviewResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{30} +} +func (m *SubjectAccessReviewResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SubjectAccessReviewResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SubjectAccessReviewResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_SubjectAccessReviewResponse.Merge(m, src) +} +func (m *SubjectAccessReviewResponse) XXX_Size() int { + return m.Size() +} +func (m *SubjectAccessReviewResponse) XXX_DiscardUnknown() { + xxx_messageInfo_SubjectAccessReviewResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_SubjectAccessReviewResponse proto.InternalMessageInfo + +func (m *SubjectRulesReview) Reset() { *m = SubjectRulesReview{} } +func (*SubjectRulesReview) ProtoMessage() {} +func (*SubjectRulesReview) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{31} +} +func (m *SubjectRulesReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SubjectRulesReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SubjectRulesReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_SubjectRulesReview.Merge(m, src) +} +func (m *SubjectRulesReview) XXX_Size() int { + return m.Size() +} +func (m *SubjectRulesReview) XXX_DiscardUnknown() { + xxx_messageInfo_SubjectRulesReview.DiscardUnknown(m) +} + +var xxx_messageInfo_SubjectRulesReview proto.InternalMessageInfo + +func (m *SubjectRulesReviewSpec) Reset() { *m = SubjectRulesReviewSpec{} } +func (*SubjectRulesReviewSpec) ProtoMessage() {} +func (*SubjectRulesReviewSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{32} +} +func (m *SubjectRulesReviewSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SubjectRulesReviewSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SubjectRulesReviewSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_SubjectRulesReviewSpec.Merge(m, src) +} +func (m *SubjectRulesReviewSpec) XXX_Size() int { + return m.Size() +} +func (m *SubjectRulesReviewSpec) XXX_DiscardUnknown() { + xxx_messageInfo_SubjectRulesReviewSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_SubjectRulesReviewSpec proto.InternalMessageInfo + +func (m *SubjectRulesReviewStatus) Reset() { *m = SubjectRulesReviewStatus{} } +func (*SubjectRulesReviewStatus) ProtoMessage() {} +func (*SubjectRulesReviewStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{33} +} +func (m *SubjectRulesReviewStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SubjectRulesReviewStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SubjectRulesReviewStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_SubjectRulesReviewStatus.Merge(m, src) +} +func (m *SubjectRulesReviewStatus) XXX_Size() int { + return m.Size() +} +func (m *SubjectRulesReviewStatus) XXX_DiscardUnknown() { + xxx_messageInfo_SubjectRulesReviewStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_SubjectRulesReviewStatus proto.InternalMessageInfo + +func (m *UserRestriction) Reset() { *m = UserRestriction{} } +func (*UserRestriction) ProtoMessage() {} +func (*UserRestriction) Descriptor() ([]byte, []int) { + return fileDescriptor_39b89822f939ca46, []int{34} +} +func (m *UserRestriction) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UserRestriction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *UserRestriction) XXX_Merge(src proto.Message) { + xxx_messageInfo_UserRestriction.Merge(m, src) +} +func (m *UserRestriction) XXX_Size() int { + return m.Size() +} +func (m *UserRestriction) XXX_DiscardUnknown() { + xxx_messageInfo_UserRestriction.DiscardUnknown(m) +} + +var xxx_messageInfo_UserRestriction proto.InternalMessageInfo + +func init() { + proto.RegisterType((*Action)(nil), "github.com.openshift.api.authorization.v1.Action") + proto.RegisterType((*ClusterRole)(nil), "github.com.openshift.api.authorization.v1.ClusterRole") + proto.RegisterType((*ClusterRoleBinding)(nil), "github.com.openshift.api.authorization.v1.ClusterRoleBinding") + proto.RegisterType((*ClusterRoleBindingList)(nil), "github.com.openshift.api.authorization.v1.ClusterRoleBindingList") + proto.RegisterType((*ClusterRoleList)(nil), "github.com.openshift.api.authorization.v1.ClusterRoleList") + proto.RegisterType((*GroupRestriction)(nil), "github.com.openshift.api.authorization.v1.GroupRestriction") + proto.RegisterType((*IsPersonalSubjectAccessReview)(nil), "github.com.openshift.api.authorization.v1.IsPersonalSubjectAccessReview") + proto.RegisterType((*LocalResourceAccessReview)(nil), "github.com.openshift.api.authorization.v1.LocalResourceAccessReview") + proto.RegisterType((*LocalSubjectAccessReview)(nil), "github.com.openshift.api.authorization.v1.LocalSubjectAccessReview") + proto.RegisterType((*NamedClusterRole)(nil), "github.com.openshift.api.authorization.v1.NamedClusterRole") + proto.RegisterType((*NamedClusterRoleBinding)(nil), "github.com.openshift.api.authorization.v1.NamedClusterRoleBinding") + proto.RegisterType((*NamedRole)(nil), "github.com.openshift.api.authorization.v1.NamedRole") + proto.RegisterType((*NamedRoleBinding)(nil), "github.com.openshift.api.authorization.v1.NamedRoleBinding") + proto.RegisterType((*OptionalNames)(nil), "github.com.openshift.api.authorization.v1.OptionalNames") + proto.RegisterType((*OptionalScopes)(nil), "github.com.openshift.api.authorization.v1.OptionalScopes") + proto.RegisterType((*PolicyRule)(nil), "github.com.openshift.api.authorization.v1.PolicyRule") + proto.RegisterType((*ResourceAccessReview)(nil), "github.com.openshift.api.authorization.v1.ResourceAccessReview") + proto.RegisterType((*ResourceAccessReviewResponse)(nil), "github.com.openshift.api.authorization.v1.ResourceAccessReviewResponse") + proto.RegisterType((*Role)(nil), "github.com.openshift.api.authorization.v1.Role") + proto.RegisterType((*RoleBinding)(nil), "github.com.openshift.api.authorization.v1.RoleBinding") + proto.RegisterType((*RoleBindingList)(nil), "github.com.openshift.api.authorization.v1.RoleBindingList") + proto.RegisterType((*RoleBindingRestriction)(nil), "github.com.openshift.api.authorization.v1.RoleBindingRestriction") + proto.RegisterType((*RoleBindingRestrictionList)(nil), "github.com.openshift.api.authorization.v1.RoleBindingRestrictionList") + proto.RegisterType((*RoleBindingRestrictionSpec)(nil), "github.com.openshift.api.authorization.v1.RoleBindingRestrictionSpec") + proto.RegisterType((*RoleList)(nil), "github.com.openshift.api.authorization.v1.RoleList") + proto.RegisterType((*SelfSubjectRulesReview)(nil), "github.com.openshift.api.authorization.v1.SelfSubjectRulesReview") + proto.RegisterType((*SelfSubjectRulesReviewSpec)(nil), "github.com.openshift.api.authorization.v1.SelfSubjectRulesReviewSpec") + proto.RegisterType((*ServiceAccountReference)(nil), "github.com.openshift.api.authorization.v1.ServiceAccountReference") + proto.RegisterType((*ServiceAccountRestriction)(nil), "github.com.openshift.api.authorization.v1.ServiceAccountRestriction") + proto.RegisterType((*SubjectAccessReview)(nil), "github.com.openshift.api.authorization.v1.SubjectAccessReview") + proto.RegisterType((*SubjectAccessReviewResponse)(nil), "github.com.openshift.api.authorization.v1.SubjectAccessReviewResponse") + proto.RegisterType((*SubjectRulesReview)(nil), "github.com.openshift.api.authorization.v1.SubjectRulesReview") + proto.RegisterType((*SubjectRulesReviewSpec)(nil), "github.com.openshift.api.authorization.v1.SubjectRulesReviewSpec") + proto.RegisterType((*SubjectRulesReviewStatus)(nil), "github.com.openshift.api.authorization.v1.SubjectRulesReviewStatus") + proto.RegisterType((*UserRestriction)(nil), "github.com.openshift.api.authorization.v1.UserRestriction") +} + +func init() { + proto.RegisterFile("github.com/openshift/api/authorization/v1/generated.proto", fileDescriptor_39b89822f939ca46) +} + +var fileDescriptor_39b89822f939ca46 = []byte{ + // 1821 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x19, 0xcd, 0x6f, 0x1b, 0x59, + 0x3d, 0xcf, 0x76, 0x1c, 0xfb, 0xe7, 0x26, 0xce, 0xbe, 0x66, 0xdb, 0x69, 0xa0, 0xb6, 0x35, 0x20, + 0x48, 0x05, 0x3b, 0x26, 0x01, 0x4a, 0xdb, 0x15, 0x5a, 0xd9, 0xdd, 0xa8, 0x8a, 0x54, 0x9a, 0xec, + 0x0b, 0xbb, 0x5a, 0x2d, 0x1f, 0x62, 0x3c, 0x79, 0xb1, 0x87, 0x8c, 0x67, 0xac, 0x79, 0xe3, 0x94, + 0x82, 0x90, 0x0a, 0x12, 0x07, 0x2e, 0x68, 0x2f, 0x20, 0x8e, 0x20, 0xfe, 0x00, 0xc4, 0x05, 0x09, + 0x24, 0x38, 0x71, 0xe8, 0x81, 0x43, 0x25, 0x2e, 0x15, 0x42, 0x86, 0xba, 0x88, 0x03, 0x07, 0xfe, + 0x06, 0xf4, 0xde, 0xbc, 0xf1, 0x7c, 0x78, 0xac, 0x78, 0x92, 0x26, 0x82, 0x55, 0x6f, 0x9e, 0xf7, + 0xfb, 0xfe, 0x7c, 0xbf, 0xdf, 0x33, 0xdc, 0xee, 0x9a, 0x5e, 0x6f, 0xd8, 0xd1, 0x0c, 0xa7, 0xdf, + 0x74, 0x06, 0xd4, 0x66, 0x3d, 0xf3, 0xd0, 0x6b, 0xea, 0x03, 0xb3, 0xa9, 0x0f, 0xbd, 0x9e, 0xe3, + 0x9a, 0xdf, 0xd5, 0x3d, 0xd3, 0xb1, 0x9b, 0xc7, 0x9b, 0xcd, 0x2e, 0xb5, 0xa9, 0xab, 0x7b, 0xf4, + 0x40, 0x1b, 0xb8, 0x8e, 0xe7, 0xe0, 0x1b, 0x21, 0xa9, 0x36, 0x21, 0xd5, 0xf4, 0x81, 0xa9, 0xc5, + 0x48, 0xb5, 0xe3, 0xcd, 0xf5, 0x37, 0x22, 0x52, 0xba, 0x4e, 0xd7, 0x69, 0x0a, 0x0e, 0x9d, 0xe1, + 0xa1, 0xf8, 0x12, 0x1f, 0xe2, 0x97, 0xcf, 0x79, 0x5d, 0x3d, 0xba, 0xc5, 0x34, 0xd3, 0x11, 0x6a, + 0x18, 0x8e, 0x4b, 0x53, 0xa4, 0xc7, 0x70, 0xdc, 0x8e, 0x6e, 0xa4, 0xe1, 0x7c, 0x21, 0xc4, 0xe9, + 0xeb, 0x46, 0xcf, 0xb4, 0xa9, 0xfb, 0xa8, 0x39, 0x38, 0xea, 0xf2, 0x03, 0xd6, 0xec, 0x53, 0x4f, + 0x4f, 0xa3, 0x6a, 0xce, 0xa2, 0x72, 0x87, 0xb6, 0x67, 0xf6, 0xe9, 0x14, 0xc1, 0xcd, 0x93, 0x08, + 0x98, 0xd1, 0xa3, 0x7d, 0x3d, 0x49, 0xa7, 0xfe, 0xa0, 0x00, 0xc5, 0x96, 0xc1, 0x7d, 0x84, 0x9b, + 0x50, 0xb6, 0xf5, 0x3e, 0x65, 0x03, 0xdd, 0xa0, 0x0a, 0x6a, 0xa0, 0x8d, 0x72, 0xfb, 0xb5, 0x27, + 0xa3, 0xfa, 0xc2, 0x78, 0x54, 0x2f, 0x3f, 0x08, 0x00, 0x24, 0xc4, 0xc1, 0x0d, 0x28, 0x1c, 0x53, + 0xb7, 0xa3, 0xe4, 0x04, 0xee, 0x25, 0x89, 0x5b, 0x78, 0x8f, 0xba, 0x1d, 0x22, 0x20, 0xf8, 0x36, + 0xac, 0xba, 0x94, 0x39, 0x43, 0xd7, 0xa0, 0xad, 0xbd, 0x9d, 0x7b, 0xae, 0x33, 0x1c, 0x28, 0x79, + 0x81, 0xbd, 0x2c, 0xb1, 0x17, 0xc5, 0x21, 0x99, 0x42, 0xc3, 0x6f, 0x01, 0x8e, 0x9c, 0xbd, 0x47, + 0x5d, 0x66, 0x3a, 0xb6, 0x52, 0x10, 0xc4, 0x55, 0x49, 0xbc, 0x24, 0x8f, 0x49, 0x0a, 0x2a, 0xfe, + 0x2c, 0x94, 0x82, 0x53, 0x65, 0x51, 0x90, 0xad, 0x4a, 0xb2, 0x12, 0x91, 0xe7, 0x64, 0x82, 0x81, + 0x6f, 0xc1, 0xa5, 0xe0, 0x37, 0xb7, 0x55, 0x29, 0x0a, 0x8a, 0x35, 0x49, 0x71, 0x89, 0x44, 0x60, + 0x24, 0x86, 0xc9, 0xbd, 0x30, 0xd0, 0xbd, 0x9e, 0x52, 0x8a, 0x7b, 0x61, 0x4f, 0xf7, 0x7a, 0x44, + 0x40, 0xf0, 0xdb, 0xb0, 0x6a, 0xb2, 0x07, 0x8e, 0x1d, 0x30, 0x79, 0x97, 0xdc, 0x57, 0xca, 0x0d, + 0xb4, 0x51, 0x6a, 0x2b, 0x12, 0x7b, 0x75, 0x27, 0x01, 0x27, 0x53, 0x14, 0xf8, 0x7d, 0x58, 0x32, + 0x1c, 0xdb, 0xa3, 0xb6, 0xa7, 0x2c, 0x35, 0xd0, 0x46, 0x65, 0xeb, 0x0d, 0xcd, 0x8f, 0xb9, 0x16, + 0x8d, 0xb9, 0x36, 0x38, 0xea, 0x6a, 0x32, 0xe6, 0x1a, 0xd1, 0x1f, 0x6e, 0x7f, 0xc7, 0xa3, 0x36, + 0xf7, 0x47, 0xe8, 0xb4, 0xbb, 0x3e, 0x17, 0x12, 0xb0, 0x53, 0x7f, 0x9d, 0x83, 0xca, 0x5d, 0x6b, + 0xc8, 0x3c, 0xea, 0x12, 0xc7, 0xa2, 0xf8, 0x5b, 0x50, 0xe2, 0x79, 0x79, 0xa0, 0x7b, 0xba, 0xc8, + 0x83, 0xca, 0xd6, 0xe7, 0x66, 0x8a, 0xe2, 0x59, 0xac, 0x71, 0x6c, 0xed, 0x78, 0x53, 0xdb, 0xed, + 0x7c, 0x9b, 0x1a, 0xde, 0x57, 0xa8, 0xa7, 0xb7, 0xb1, 0x94, 0x06, 0xe1, 0x19, 0x99, 0x70, 0xc5, + 0x1f, 0xc0, 0xa2, 0x3b, 0xb4, 0x28, 0x53, 0x72, 0x8d, 0xfc, 0x46, 0x65, 0xeb, 0x8b, 0xda, 0xdc, + 0x65, 0xac, 0xed, 0x39, 0x96, 0x69, 0x3c, 0x22, 0x43, 0x8b, 0x86, 0x39, 0xc4, 0xbf, 0x18, 0xf1, + 0x59, 0xe2, 0x0e, 0x54, 0xf5, 0x6e, 0xd7, 0xa5, 0x5d, 0x41, 0xc2, 0x41, 0x22, 0xe5, 0x2a, 0x5b, + 0x9f, 0x88, 0x18, 0xa1, 0xf1, 0x72, 0xe5, 0xec, 0x5a, 0x71, 0xd4, 0xf6, 0xe5, 0xf1, 0xa8, 0x5e, + 0x4d, 0x1c, 0x92, 0x24, 0x43, 0xf5, 0xdf, 0x79, 0xc0, 0x11, 0x8f, 0xb5, 0x4d, 0xfb, 0xc0, 0xb4, + 0xbb, 0x17, 0xe0, 0x38, 0x0a, 0xe5, 0x21, 0xa3, 0xae, 0x28, 0x47, 0x51, 0x77, 0x95, 0xad, 0x5b, + 0x19, 0x9c, 0xb7, 0x3b, 0xe0, 0xbf, 0x74, 0x4b, 0xd0, 0xb7, 0x97, 0x79, 0x65, 0xbf, 0x1b, 0xb0, + 0x23, 0x21, 0x67, 0xdc, 0x03, 0xe8, 0xf2, 0x2a, 0xf4, 0xe5, 0xe4, 0xcf, 0x28, 0x67, 0x85, 0x9b, + 0x73, 0x6f, 0xc2, 0x8f, 0x44, 0x78, 0xe3, 0x77, 0xa0, 0xc4, 0x86, 0xc2, 0x52, 0xa6, 0x14, 0x44, + 0x32, 0xc4, 0xc2, 0xc4, 0x3b, 0x6f, 0xe8, 0x20, 0x42, 0x0f, 0xa9, 0x4b, 0x6d, 0x83, 0x86, 0xa5, + 0xbc, 0x2f, 0x89, 0xc9, 0x84, 0x0d, 0x7e, 0x00, 0x4b, 0xae, 0x63, 0x51, 0x42, 0x0f, 0x45, 0xdd, + 0xcf, 0xc9, 0x71, 0x52, 0x1e, 0xc4, 0xa7, 0x25, 0x01, 0x13, 0xf5, 0xaf, 0x08, 0xae, 0x4c, 0x07, + 0xfb, 0xbe, 0xc9, 0x3c, 0xfc, 0xf5, 0xa9, 0x80, 0x6b, 0xf3, 0x05, 0x9c, 0x53, 0x8b, 0x70, 0x4f, + 0x0c, 0x09, 0x4e, 0x22, 0xc1, 0xee, 0xc0, 0xa2, 0xe9, 0xd1, 0x7e, 0x50, 0x25, 0x5f, 0xce, 0x10, + 0x80, 0x69, 0x7d, 0xc3, 0x6a, 0xd9, 0xe1, 0x3c, 0x89, 0xcf, 0x5a, 0xfd, 0x33, 0x82, 0x6a, 0x04, + 0xf9, 0x02, 0xac, 0xfa, 0x5a, 0xdc, 0xaa, 0x9b, 0xa7, 0xb4, 0x2a, 0xdd, 0x9c, 0x9f, 0x21, 0x58, + 0xf5, 0x6f, 0x14, 0xca, 0x3c, 0xd7, 0xf4, 0x2f, 0x36, 0x15, 0x8a, 0x22, 0xe3, 0x98, 0x82, 0x1a, + 0xf9, 0x8d, 0x72, 0x1b, 0xc6, 0xa3, 0x7a, 0x51, 0x60, 0x31, 0x22, 0x21, 0xf8, 0x9b, 0x50, 0xb4, + 0xf4, 0x0e, 0xb5, 0x02, 0xb5, 0x3e, 0x3f, 0xa7, 0xc5, 0x9c, 0x66, 0x9f, 0x5a, 0xd4, 0xf0, 0x1c, + 0x37, 0xbc, 0x2e, 0x83, 0x13, 0x46, 0x24, 0x57, 0xb5, 0x0e, 0xd7, 0x77, 0xd8, 0x1e, 0x75, 0x19, + 0x2f, 0x0b, 0x99, 0xb4, 0x2d, 0xc3, 0xa0, 0x8c, 0x11, 0x7a, 0x6c, 0xd2, 0x87, 0xaa, 0x05, 0xd7, + 0xee, 0x3b, 0x86, 0x6e, 0x05, 0x2d, 0x3f, 0x0a, 0xc4, 0xbb, 0xc1, 0x25, 0x2d, 0xe3, 0xb1, 0x99, + 0xc1, 0x69, 0x3e, 0x61, 0xbb, 0xc0, 0x75, 0x23, 0x92, 0x8d, 0xfa, 0xd3, 0x1c, 0x28, 0x42, 0x5c, + 0x8a, 0x2a, 0x2f, 0x5d, 0x1a, 0xbf, 0x22, 0x79, 0x6f, 0x49, 0x0e, 0x0a, 0xbc, 0xf5, 0x10, 0x01, + 0xc1, 0x9f, 0x9e, 0x84, 0x28, 0x2f, 0x42, 0x54, 0x1d, 0x8f, 0xea, 0x15, 0x3f, 0x44, 0xfb, 0x96, + 0x69, 0xd0, 0x49, 0x9c, 0xbe, 0x01, 0x45, 0x66, 0x38, 0x03, 0xca, 0xc4, 0x28, 0x50, 0xd9, 0xba, + 0x7d, 0x8a, 0xae, 0xb4, 0x2f, 0x18, 0xf8, 0x69, 0xe0, 0xff, 0x26, 0x92, 0xa9, 0xfa, 0x13, 0x04, + 0xab, 0xbc, 0x31, 0x1d, 0x44, 0xef, 0xc3, 0x06, 0x14, 0xf8, 0xd0, 0x23, 0x67, 0xa2, 0x89, 0xfa, + 0x62, 0x16, 0x10, 0x10, 0xfc, 0x3e, 0x14, 0x78, 0xb7, 0x90, 0x1d, 0xf9, 0xb4, 0x29, 0x3d, 0xe1, + 0x2c, 0x5a, 0x90, 0xe0, 0xa8, 0xfe, 0x06, 0xc1, 0xd5, 0xa4, 0x42, 0xc1, 0x75, 0x73, 0xb2, 0x5e, + 0x1e, 0x54, 0xdc, 0x90, 0x40, 0xaa, 0x77, 0xc6, 0x3e, 0x72, 0x59, 0xca, 0xa9, 0x44, 0x0e, 0x49, + 0x54, 0x8c, 0xfa, 0x18, 0x81, 0x18, 0x18, 0x0f, 0xe6, 0xf4, 0xde, 0x3b, 0x31, 0xef, 0x35, 0x33, + 0xa8, 0x37, 0xd3, 0x6d, 0xbf, 0x0a, 0xe2, 0x98, 0xcd, 0x5f, 0xfd, 0x34, 0x7f, 0xdd, 0xcc, 0xaa, + 0xd0, 0xdc, 0x8e, 0xba, 0x03, 0xcb, 0xb1, 0x9b, 0x12, 0xd7, 0x83, 0xde, 0xe8, 0x37, 0xaa, 0x72, + 0xb2, 0xbf, 0xdd, 0x29, 0xfd, 0xfc, 0x17, 0xf5, 0x85, 0xc7, 0x7f, 0x6b, 0x2c, 0xa8, 0x6f, 0xc2, + 0x4a, 0x3c, 0x9f, 0xb3, 0x10, 0xff, 0x38, 0x0f, 0x10, 0x0e, 0x52, 0x9c, 0x92, 0x8f, 0xeb, 0x31, + 0x4a, 0x3e, 0xc5, 0x33, 0xe2, 0x9f, 0xe3, 0x1f, 0x22, 0x78, 0x5d, 0xf7, 0x3c, 0xd7, 0xec, 0x0c, + 0x3d, 0x1a, 0x69, 0xad, 0xc1, 0x0c, 0x92, 0x71, 0x14, 0xbd, 0x2e, 0x3d, 0xf3, 0x7a, 0x2b, 0x8d, + 0x27, 0x49, 0x17, 0x85, 0x3f, 0x03, 0x65, 0x7d, 0x60, 0xde, 0x8b, 0xb6, 0x09, 0x31, 0xc1, 0x04, + 0x2b, 0x03, 0x23, 0x21, 0x9c, 0x23, 0x07, 0x53, 0xba, 0x3f, 0x58, 0x48, 0xe4, 0xa0, 0xbd, 0x32, + 0x12, 0xc2, 0xf1, 0x97, 0x60, 0x39, 0x3a, 0xd2, 0x33, 0x65, 0x51, 0x10, 0xbc, 0x36, 0x1e, 0xd5, + 0x97, 0xa3, 0x93, 0x3f, 0x23, 0x71, 0x3c, 0xdc, 0x86, 0xaa, 0x1d, 0x9b, 0xd2, 0x99, 0x52, 0x14, + 0xa4, 0xca, 0x78, 0x54, 0x5f, 0x8b, 0x0f, 0xf0, 0xb2, 0x91, 0x25, 0x09, 0xd4, 0x2e, 0xac, 0x5d, + 0x4c, 0xcf, 0xff, 0x3b, 0x82, 0x8f, 0xa7, 0x49, 0x22, 0x94, 0x0d, 0x1c, 0x9b, 0xd1, 0xec, 0x0b, + 0xe0, 0x27, 0x61, 0x91, 0x77, 0x6f, 0xff, 0xce, 0x2c, 0xfb, 0x73, 0x1e, 0x6f, 0xea, 0xd2, 0x54, + 0x1f, 0x38, 0x7f, 0x6f, 0x7f, 0x0b, 0x56, 0xe8, 0xb1, 0x6e, 0x0d, 0xb9, 0xb6, 0xdb, 0xae, 0xeb, + 0xb8, 0x72, 0xdd, 0xbb, 0x2a, 0x95, 0xa8, 0x6e, 0x73, 0xa8, 0x3e, 0x01, 0x93, 0x04, 0xba, 0xfa, + 0x27, 0x04, 0x85, 0xff, 0xff, 0x0d, 0x46, 0x7d, 0x91, 0x87, 0xca, 0xab, 0xb5, 0xe2, 0xa3, 0xbe, + 0x56, 0xf0, 0xc9, 0xfb, 0x62, 0xf7, 0x89, 0x33, 0x4c, 0xde, 0x27, 0x2f, 0x12, 0x2f, 0x10, 0x5c, + 0x89, 0x5e, 0x74, 0x91, 0xf9, 0xfb, 0xfc, 0xf3, 0xb7, 0x0b, 0x05, 0x36, 0xa0, 0x86, 0x4c, 0xdd, + 0xed, 0xd3, 0x19, 0x16, 0x51, 0x79, 0x7f, 0x40, 0x8d, 0x70, 0x40, 0xe0, 0x5f, 0x44, 0x08, 0x50, + 0xc7, 0x08, 0xd6, 0xd3, 0x49, 0x2e, 0x20, 0x7e, 0x87, 0xf1, 0xf8, 0xb5, 0xce, 0x6c, 0xe6, 0x8c, + 0x50, 0xfe, 0x3e, 0x3f, 0xcb, 0x48, 0xee, 0x09, 0xfc, 0x08, 0xaa, 0xbc, 0xa4, 0xdd, 0xf0, 0x58, + 0xda, 0x7a, 0x27, 0x83, 0x42, 0x62, 0xf6, 0x8f, 0x68, 0x22, 0xde, 0x5d, 0x12, 0x87, 0x24, 0x29, + 0x07, 0x7f, 0x1f, 0x56, 0x45, 0x91, 0x47, 0x65, 0xfb, 0x31, 0x7f, 0x33, 0x83, 0xec, 0xe4, 0x82, + 0xd8, 0x5e, 0x1b, 0x8f, 0xea, 0x53, 0x6b, 0x23, 0x99, 0x12, 0x85, 0x7f, 0x89, 0xe0, 0x1a, 0xa3, + 0xee, 0xb1, 0x69, 0x50, 0xdd, 0x30, 0x9c, 0xa1, 0xed, 0x45, 0x15, 0xf1, 0xfb, 0xd9, 0xdb, 0x19, + 0x14, 0xd9, 0xf7, 0x79, 0xb5, 0x7c, 0x5e, 0x51, 0x8d, 0xae, 0x8f, 0x47, 0xf5, 0x6b, 0x33, 0xc1, + 0x64, 0xb6, 0x16, 0xea, 0x1f, 0x11, 0x94, 0x2e, 0x68, 0x93, 0xff, 0x6a, 0x3c, 0x1f, 0x33, 0x0f, + 0xee, 0xe9, 0xd9, 0xf7, 0x1f, 0x04, 0x57, 0xf6, 0xa9, 0x75, 0x28, 0x5b, 0xb0, 0x7f, 0x33, 0xfa, + 0x23, 0x51, 0x50, 0xe6, 0x28, 0x73, 0x99, 0xa7, 0x33, 0x9c, 0x55, 0xe6, 0xf8, 0x08, 0x8a, 0xcc, + 0xd3, 0xbd, 0x61, 0x70, 0x19, 0xde, 0xcd, 0x22, 0x6a, 0x5a, 0x8c, 0x60, 0xd5, 0x5e, 0x91, 0x82, + 0x8a, 0xfe, 0x37, 0x91, 0x22, 0xd4, 0xef, 0xc1, 0xfa, 0x6c, 0xf5, 0x22, 0x0b, 0x2f, 0x3a, 0x8f, + 0x85, 0xd7, 0x82, 0xab, 0xc9, 0x34, 0x93, 0x57, 0xd7, 0x1c, 0xeb, 0x52, 0x6c, 0x60, 0xcc, 0x9d, + 0x3c, 0x30, 0xaa, 0x7f, 0x41, 0x30, 0x3b, 0xab, 0xf1, 0x8f, 0x10, 0x54, 0xe3, 0x89, 0xed, 0x6f, + 0x24, 0x95, 0xad, 0xf6, 0x19, 0x8a, 0x2a, 0xb8, 0x89, 0x27, 0x53, 0x64, 0x1c, 0x81, 0x91, 0xa4, + 0x4c, 0xac, 0x01, 0x4c, 0x54, 0x8e, 0xcd, 0xb6, 0x13, 0x9b, 0x18, 0x89, 0x60, 0xa8, 0x1f, 0xe6, + 0xe0, 0xf2, 0xab, 0x77, 0x94, 0x58, 0x5a, 0xfd, 0x13, 0xc1, 0xc7, 0x52, 0x5c, 0x72, 0xfa, 0x55, + 0xe3, 0x06, 0x2c, 0xe9, 0x96, 0xe5, 0x3c, 0xa4, 0x07, 0xc2, 0xfa, 0x52, 0x38, 0x58, 0xb5, 0xfc, + 0x63, 0x12, 0xc0, 0xf1, 0xa7, 0xa0, 0xe8, 0x52, 0x9d, 0xc9, 0x8e, 0x5c, 0x0e, 0xeb, 0x8e, 0x88, + 0x53, 0x22, 0xa1, 0xb8, 0x05, 0x55, 0x1a, 0x5f, 0x28, 0x4e, 0xda, 0x37, 0x92, 0xf8, 0xea, 0xbf, + 0x10, 0xe0, 0x94, 0x3e, 0x65, 0xc4, 0xfa, 0x54, 0xeb, 0x6c, 0xcd, 0xe3, 0x7f, 0xa2, 0x47, 0xfd, + 0x81, 0x37, 0xe5, 0xf4, 0x06, 0x15, 0x24, 0x25, 0x9a, 0x99, 0x94, 0xe1, 0xfb, 0x6b, 0x6e, 0xe6, + 0xfb, 0x6b, 0x98, 0x8f, 0xf9, 0xf3, 0xc8, 0xc7, 0xdf, 0x21, 0x50, 0x66, 0x19, 0x1d, 0xee, 0x72, + 0xe8, 0xe5, 0xff, 0x1b, 0x95, 0x92, 0x64, 0xb9, 0x8c, 0x49, 0xf6, 0x5b, 0x04, 0xc9, 0xc9, 0x08, + 0xd7, 0x83, 0xcd, 0x3b, 0xf2, 0x62, 0x23, 0x36, 0xef, 0x60, 0xe9, 0x9e, 0xc7, 0xe7, 0xe1, 0x9b, + 0x77, 0xfe, 0x3c, 0xde, 0xbc, 0xdb, 0xbb, 0x4f, 0x9e, 0xd7, 0x16, 0x9e, 0x3e, 0xaf, 0x2d, 0x3c, + 0x7b, 0x5e, 0x5b, 0x78, 0x3c, 0xae, 0xa1, 0x27, 0xe3, 0x1a, 0x7a, 0x3a, 0xae, 0xa1, 0x67, 0xe3, + 0x1a, 0xfa, 0xc7, 0xb8, 0x86, 0x3e, 0x7c, 0x51, 0x5b, 0xf8, 0xe0, 0xc6, 0xdc, 0xff, 0xfe, 0xff, + 0x37, 0x00, 0x00, 0xff, 0xff, 0xac, 0xa0, 0x30, 0xab, 0x29, 0x20, 0x00, 0x00, +} + +func (m *Action) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Action) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Action) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i-- + if m.IsNonResourceURL { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x48 + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0x42 + { + size, err := m.Content.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + i -= len(m.ResourceName) + copy(dAtA[i:], m.ResourceName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceName))) + i-- + dAtA[i] = 0x32 + i -= len(m.Resource) + copy(dAtA[i:], m.Resource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) + i-- + dAtA[i] = 0x2a + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0x22 + i -= len(m.Group) + copy(dAtA[i:], m.Group) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i-- + dAtA[i] = 0x1a + i -= len(m.Verb) + copy(dAtA[i:], m.Verb) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verb))) + i-- + dAtA[i] = 0x12 + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ClusterRole) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterRole) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterRole) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.AggregationRule != nil { + { + size, err := m.AggregationRule.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ClusterRoleBinding) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterRoleBinding) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterRoleBinding) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.RoleRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + if len(m.Subjects) > 0 { + for iNdEx := len(m.Subjects) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Subjects[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if m.GroupNames != nil { + { + size, err := m.GroupNames.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.UserNames != nil { + { + size, err := m.UserNames.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ClusterRoleBindingList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterRoleBindingList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterRoleBindingList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ClusterRoleList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterRoleList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterRoleList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *GroupRestriction) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GroupRestriction) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GroupRestriction) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Selectors) > 0 { + for iNdEx := len(m.Selectors) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Selectors[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Groups) > 0 { + for iNdEx := len(m.Groups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Groups[iNdEx]) + copy(dAtA[i:], m.Groups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Groups[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *IsPersonalSubjectAccessReview) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IsPersonalSubjectAccessReview) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IsPersonalSubjectAccessReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *LocalResourceAccessReview) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LocalResourceAccessReview) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LocalResourceAccessReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Action.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *LocalSubjectAccessReview) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LocalSubjectAccessReview) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LocalSubjectAccessReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Scopes != nil { + { + size, err := m.Scopes.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if len(m.GroupsSlice) > 0 { + for iNdEx := len(m.GroupsSlice) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.GroupsSlice[iNdEx]) + copy(dAtA[i:], m.GroupsSlice[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.GroupsSlice[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.User) + copy(dAtA[i:], m.User) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.User))) + i-- + dAtA[i] = 0x12 + { + size, err := m.Action.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *NamedClusterRole) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NamedClusterRole) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NamedClusterRole) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Role.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *NamedClusterRoleBinding) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NamedClusterRoleBinding) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NamedClusterRoleBinding) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.RoleBinding.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *NamedRole) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NamedRole) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NamedRole) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Role.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *NamedRoleBinding) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NamedRoleBinding) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NamedRoleBinding) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.RoleBinding.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m OptionalNames) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m OptionalNames) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m OptionalNames) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m) > 0 { + for iNdEx := len(m) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m[iNdEx]) + copy(dAtA[i:], m[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m OptionalScopes) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m OptionalScopes) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m OptionalScopes) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m) > 0 { + for iNdEx := len(m) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m[iNdEx]) + copy(dAtA[i:], m[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *PolicyRule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PolicyRule) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PolicyRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.NonResourceURLsSlice) > 0 { + for iNdEx := len(m.NonResourceURLsSlice) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.NonResourceURLsSlice[iNdEx]) + copy(dAtA[i:], m.NonResourceURLsSlice[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NonResourceURLsSlice[iNdEx]))) + i-- + dAtA[i] = 0x32 + } + } + if len(m.ResourceNames) > 0 { + for iNdEx := len(m.ResourceNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ResourceNames[iNdEx]) + copy(dAtA[i:], m.ResourceNames[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceNames[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + if len(m.Resources) > 0 { + for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Resources[iNdEx]) + copy(dAtA[i:], m.Resources[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resources[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if len(m.APIGroups) > 0 { + for iNdEx := len(m.APIGroups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.APIGroups[iNdEx]) + copy(dAtA[i:], m.APIGroups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroups[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + { + size, err := m.AttributeRestrictions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if len(m.Verbs) > 0 { + for iNdEx := len(m.Verbs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Verbs[iNdEx]) + copy(dAtA[i:], m.Verbs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verbs[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ResourceAccessReview) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceAccessReview) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceAccessReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Action.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceAccessReviewResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceAccessReviewResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceAccessReviewResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.EvaluationError) + copy(dAtA[i:], m.EvaluationError) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.EvaluationError))) + i-- + dAtA[i] = 0x22 + if len(m.GroupsSlice) > 0 { + for iNdEx := len(m.GroupsSlice) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.GroupsSlice[iNdEx]) + copy(dAtA[i:], m.GroupsSlice[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.GroupsSlice[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.UsersSlice) > 0 { + for iNdEx := len(m.UsersSlice) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.UsersSlice[iNdEx]) + copy(dAtA[i:], m.UsersSlice[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UsersSlice[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Role) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Role) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Role) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RoleBinding) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RoleBinding) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RoleBinding) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.RoleRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + if len(m.Subjects) > 0 { + for iNdEx := len(m.Subjects) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Subjects[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if m.GroupNames != nil { + { + size, err := m.GroupNames.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.UserNames != nil { + { + size, err := m.UserNames.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RoleBindingList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RoleBindingList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RoleBindingList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RoleBindingRestriction) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RoleBindingRestriction) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RoleBindingRestriction) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RoleBindingRestrictionList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RoleBindingRestrictionList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RoleBindingRestrictionList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RoleBindingRestrictionSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RoleBindingRestrictionSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RoleBindingRestrictionSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ServiceAccountRestriction != nil { + { + size, err := m.ServiceAccountRestriction.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.GroupRestriction != nil { + { + size, err := m.GroupRestriction.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.UserRestriction != nil { + { + size, err := m.UserRestriction.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RoleList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RoleList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RoleList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SelfSubjectRulesReview) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SelfSubjectRulesReview) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SelfSubjectRulesReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SelfSubjectRulesReviewSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SelfSubjectRulesReviewSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SelfSubjectRulesReviewSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Scopes != nil { + { + size, err := m.Scopes.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ServiceAccountReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceAccountReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServiceAccountReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ServiceAccountRestriction) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceAccountRestriction) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServiceAccountRestriction) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Namespaces) > 0 { + for iNdEx := len(m.Namespaces) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Namespaces[iNdEx]) + copy(dAtA[i:], m.Namespaces[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespaces[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.ServiceAccounts) > 0 { + for iNdEx := len(m.ServiceAccounts) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ServiceAccounts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *SubjectAccessReview) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SubjectAccessReview) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SubjectAccessReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Scopes != nil { + { + size, err := m.Scopes.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if len(m.GroupsSlice) > 0 { + for iNdEx := len(m.GroupsSlice) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.GroupsSlice[iNdEx]) + copy(dAtA[i:], m.GroupsSlice[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.GroupsSlice[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.User) + copy(dAtA[i:], m.User) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.User))) + i-- + dAtA[i] = 0x12 + { + size, err := m.Action.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SubjectAccessReviewResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SubjectAccessReviewResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SubjectAccessReviewResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.EvaluationError) + copy(dAtA[i:], m.EvaluationError) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.EvaluationError))) + i-- + dAtA[i] = 0x22 + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x1a + i-- + if m.Allowed { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SubjectRulesReview) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SubjectRulesReview) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SubjectRulesReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SubjectRulesReviewSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SubjectRulesReviewSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SubjectRulesReviewSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Scopes != nil { + { + size, err := m.Scopes.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.Groups) > 0 { + for iNdEx := len(m.Groups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Groups[iNdEx]) + copy(dAtA[i:], m.Groups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Groups[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.User) + copy(dAtA[i:], m.User) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.User))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SubjectRulesReviewStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SubjectRulesReviewStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SubjectRulesReviewStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.EvaluationError) + copy(dAtA[i:], m.EvaluationError) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.EvaluationError))) + i-- + dAtA[i] = 0x12 + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *UserRestriction) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UserRestriction) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UserRestriction) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Selectors) > 0 { + for iNdEx := len(m.Selectors) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Selectors[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.Groups) > 0 { + for iNdEx := len(m.Groups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Groups[iNdEx]) + copy(dAtA[i:], m.Groups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Groups[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Users) > 0 { + for iNdEx := len(m.Users) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Users[iNdEx]) + copy(dAtA[i:], m.Users[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Users[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Action) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Verb) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Group) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Version) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Resource) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ResourceName) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Content.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n +} + +func (m *ClusterRole) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.AggregationRule != nil { + l = m.AggregationRule.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ClusterRoleBinding) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.UserNames != nil { + l = m.UserNames.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.GroupNames != nil { + l = m.GroupNames.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Subjects) > 0 { + for _, e := range m.Subjects { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.RoleRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ClusterRoleBindingList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ClusterRoleList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *GroupRestriction) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Groups) > 0 { + for _, s := range m.Groups { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Selectors) > 0 { + for _, e := range m.Selectors { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *IsPersonalSubjectAccessReview) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *LocalResourceAccessReview) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Action.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *LocalSubjectAccessReview) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Action.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.User) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.GroupsSlice) > 0 { + for _, s := range m.GroupsSlice { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Scopes != nil { + l = m.Scopes.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *NamedClusterRole) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Role.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *NamedClusterRoleBinding) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.RoleBinding.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *NamedRole) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Role.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *NamedRoleBinding) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.RoleBinding.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m OptionalNames) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m) > 0 { + for _, s := range m { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m OptionalScopes) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m) > 0 { + for _, s := range m { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PolicyRule) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Verbs) > 0 { + for _, s := range m.Verbs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.AttributeRestrictions.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.APIGroups) > 0 { + for _, s := range m.APIGroups { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Resources) > 0 { + for _, s := range m.Resources { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.ResourceNames) > 0 { + for _, s := range m.ResourceNames { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.NonResourceURLsSlice) > 0 { + for _, s := range m.NonResourceURLsSlice { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ResourceAccessReview) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Action.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceAccessReviewResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.UsersSlice) > 0 { + for _, s := range m.UsersSlice { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.GroupsSlice) > 0 { + for _, s := range m.GroupsSlice { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.EvaluationError) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Role) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *RoleBinding) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.UserNames != nil { + l = m.UserNames.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.GroupNames != nil { + l = m.GroupNames.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Subjects) > 0 { + for _, e := range m.Subjects { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.RoleRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *RoleBindingList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *RoleBindingRestriction) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *RoleBindingRestrictionList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *RoleBindingRestrictionSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.UserRestriction != nil { + l = m.UserRestriction.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.GroupRestriction != nil { + l = m.GroupRestriction.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ServiceAccountRestriction != nil { + l = m.ServiceAccountRestriction.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *RoleList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *SelfSubjectRulesReview) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SelfSubjectRulesReviewSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Scopes != nil { + l = m.Scopes.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ServiceAccountReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ServiceAccountRestriction) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ServiceAccounts) > 0 { + for _, e := range m.ServiceAccounts { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Namespaces) > 0 { + for _, s := range m.Namespaces { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *SubjectAccessReview) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Action.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.User) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.GroupsSlice) > 0 { + for _, s := range m.GroupsSlice { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Scopes != nil { + l = m.Scopes.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *SubjectAccessReviewResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.EvaluationError) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SubjectRulesReview) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SubjectRulesReviewSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.User) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Groups) > 0 { + for _, s := range m.Groups { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Scopes != nil { + l = m.Scopes.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *SubjectRulesReviewStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.EvaluationError) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *UserRestriction) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Users) > 0 { + for _, s := range m.Users { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Groups) > 0 { + for _, s := range m.Groups { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Selectors) > 0 { + for _, e := range m.Selectors { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Action) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Action{`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Verb:` + fmt.Sprintf("%v", this.Verb) + `,`, + `Group:` + fmt.Sprintf("%v", this.Group) + `,`, + `Version:` + fmt.Sprintf("%v", this.Version) + `,`, + `Resource:` + fmt.Sprintf("%v", this.Resource) + `,`, + `ResourceName:` + fmt.Sprintf("%v", this.ResourceName) + `,`, + `Content:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Content), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `IsNonResourceURL:` + fmt.Sprintf("%v", this.IsNonResourceURL) + `,`, + `}`, + }, "") + return s +} +func (this *ClusterRole) String() string { + if this == nil { + return "nil" + } + repeatedStringForRules := "[]PolicyRule{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + "," + } + repeatedStringForRules += "}" + s := strings.Join([]string{`&ClusterRole{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Rules:` + repeatedStringForRules + `,`, + `AggregationRule:` + strings.Replace(fmt.Sprintf("%v", this.AggregationRule), "AggregationRule", "v11.AggregationRule", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ClusterRoleBinding) String() string { + if this == nil { + return "nil" + } + repeatedStringForSubjects := "[]ObjectReference{" + for _, f := range this.Subjects { + repeatedStringForSubjects += fmt.Sprintf("%v", f) + "," + } + repeatedStringForSubjects += "}" + s := strings.Join([]string{`&ClusterRoleBinding{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `UserNames:` + strings.Replace(fmt.Sprintf("%v", this.UserNames), "OptionalNames", "OptionalNames", 1) + `,`, + `GroupNames:` + strings.Replace(fmt.Sprintf("%v", this.GroupNames), "OptionalNames", "OptionalNames", 1) + `,`, + `Subjects:` + repeatedStringForSubjects + `,`, + `RoleRef:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.RoleRef), "ObjectReference", "v12.ObjectReference", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ClusterRoleBindingList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ClusterRoleBinding{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ClusterRoleBinding", "ClusterRoleBinding", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ClusterRoleBindingList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ClusterRoleList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ClusterRole{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ClusterRole", "ClusterRole", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ClusterRoleList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *GroupRestriction) String() string { + if this == nil { + return "nil" + } + repeatedStringForSelectors := "[]LabelSelector{" + for _, f := range this.Selectors { + repeatedStringForSelectors += fmt.Sprintf("%v", f) + "," + } + repeatedStringForSelectors += "}" + s := strings.Join([]string{`&GroupRestriction{`, + `Groups:` + fmt.Sprintf("%v", this.Groups) + `,`, + `Selectors:` + repeatedStringForSelectors + `,`, + `}`, + }, "") + return s +} +func (this *IsPersonalSubjectAccessReview) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IsPersonalSubjectAccessReview{`, + `}`, + }, "") + return s +} +func (this *LocalResourceAccessReview) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LocalResourceAccessReview{`, + `Action:` + strings.Replace(strings.Replace(this.Action.String(), "Action", "Action", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *LocalSubjectAccessReview) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LocalSubjectAccessReview{`, + `Action:` + strings.Replace(strings.Replace(this.Action.String(), "Action", "Action", 1), `&`, ``, 1) + `,`, + `User:` + fmt.Sprintf("%v", this.User) + `,`, + `GroupsSlice:` + fmt.Sprintf("%v", this.GroupsSlice) + `,`, + `Scopes:` + strings.Replace(fmt.Sprintf("%v", this.Scopes), "OptionalScopes", "OptionalScopes", 1) + `,`, + `}`, + }, "") + return s +} +func (this *NamedClusterRole) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NamedClusterRole{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Role:` + strings.Replace(strings.Replace(this.Role.String(), "ClusterRole", "ClusterRole", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NamedClusterRoleBinding) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NamedClusterRoleBinding{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `RoleBinding:` + strings.Replace(strings.Replace(this.RoleBinding.String(), "ClusterRoleBinding", "ClusterRoleBinding", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NamedRole) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NamedRole{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Role:` + strings.Replace(strings.Replace(this.Role.String(), "Role", "Role", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NamedRoleBinding) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NamedRoleBinding{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `RoleBinding:` + strings.Replace(strings.Replace(this.RoleBinding.String(), "RoleBinding", "RoleBinding", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PolicyRule) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PolicyRule{`, + `Verbs:` + fmt.Sprintf("%v", this.Verbs) + `,`, + `AttributeRestrictions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.AttributeRestrictions), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `APIGroups:` + fmt.Sprintf("%v", this.APIGroups) + `,`, + `Resources:` + fmt.Sprintf("%v", this.Resources) + `,`, + `ResourceNames:` + fmt.Sprintf("%v", this.ResourceNames) + `,`, + `NonResourceURLsSlice:` + fmt.Sprintf("%v", this.NonResourceURLsSlice) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceAccessReview) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceAccessReview{`, + `Action:` + strings.Replace(strings.Replace(this.Action.String(), "Action", "Action", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceAccessReviewResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceAccessReviewResponse{`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `UsersSlice:` + fmt.Sprintf("%v", this.UsersSlice) + `,`, + `GroupsSlice:` + fmt.Sprintf("%v", this.GroupsSlice) + `,`, + `EvaluationError:` + fmt.Sprintf("%v", this.EvaluationError) + `,`, + `}`, + }, "") + return s +} +func (this *Role) String() string { + if this == nil { + return "nil" + } + repeatedStringForRules := "[]PolicyRule{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + "," + } + repeatedStringForRules += "}" + s := strings.Join([]string{`&Role{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Rules:` + repeatedStringForRules + `,`, + `}`, + }, "") + return s +} +func (this *RoleBinding) String() string { + if this == nil { + return "nil" + } + repeatedStringForSubjects := "[]ObjectReference{" + for _, f := range this.Subjects { + repeatedStringForSubjects += fmt.Sprintf("%v", f) + "," + } + repeatedStringForSubjects += "}" + s := strings.Join([]string{`&RoleBinding{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `UserNames:` + strings.Replace(fmt.Sprintf("%v", this.UserNames), "OptionalNames", "OptionalNames", 1) + `,`, + `GroupNames:` + strings.Replace(fmt.Sprintf("%v", this.GroupNames), "OptionalNames", "OptionalNames", 1) + `,`, + `Subjects:` + repeatedStringForSubjects + `,`, + `RoleRef:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.RoleRef), "ObjectReference", "v12.ObjectReference", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *RoleBindingList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]RoleBinding{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "RoleBinding", "RoleBinding", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&RoleBindingList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *RoleBindingRestriction) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RoleBindingRestriction{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "RoleBindingRestrictionSpec", "RoleBindingRestrictionSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *RoleBindingRestrictionList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]RoleBindingRestriction{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "RoleBindingRestriction", "RoleBindingRestriction", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&RoleBindingRestrictionList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *RoleBindingRestrictionSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RoleBindingRestrictionSpec{`, + `UserRestriction:` + strings.Replace(this.UserRestriction.String(), "UserRestriction", "UserRestriction", 1) + `,`, + `GroupRestriction:` + strings.Replace(this.GroupRestriction.String(), "GroupRestriction", "GroupRestriction", 1) + `,`, + `ServiceAccountRestriction:` + strings.Replace(this.ServiceAccountRestriction.String(), "ServiceAccountRestriction", "ServiceAccountRestriction", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RoleList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Role{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Role", "Role", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&RoleList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *SelfSubjectRulesReview) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelfSubjectRulesReview{`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SelfSubjectRulesReviewSpec", "SelfSubjectRulesReviewSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectRulesReviewStatus", "SubjectRulesReviewStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *SelfSubjectRulesReviewSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelfSubjectRulesReviewSpec{`, + `Scopes:` + strings.Replace(fmt.Sprintf("%v", this.Scopes), "OptionalScopes", "OptionalScopes", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceAccountReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceAccountReference{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceAccountRestriction) String() string { + if this == nil { + return "nil" + } + repeatedStringForServiceAccounts := "[]ServiceAccountReference{" + for _, f := range this.ServiceAccounts { + repeatedStringForServiceAccounts += strings.Replace(strings.Replace(f.String(), "ServiceAccountReference", "ServiceAccountReference", 1), `&`, ``, 1) + "," + } + repeatedStringForServiceAccounts += "}" + s := strings.Join([]string{`&ServiceAccountRestriction{`, + `ServiceAccounts:` + repeatedStringForServiceAccounts + `,`, + `Namespaces:` + fmt.Sprintf("%v", this.Namespaces) + `,`, + `}`, + }, "") + return s +} +func (this *SubjectAccessReview) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SubjectAccessReview{`, + `Action:` + strings.Replace(strings.Replace(this.Action.String(), "Action", "Action", 1), `&`, ``, 1) + `,`, + `User:` + fmt.Sprintf("%v", this.User) + `,`, + `GroupsSlice:` + fmt.Sprintf("%v", this.GroupsSlice) + `,`, + `Scopes:` + strings.Replace(fmt.Sprintf("%v", this.Scopes), "OptionalScopes", "OptionalScopes", 1) + `,`, + `}`, + }, "") + return s +} +func (this *SubjectAccessReviewResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SubjectAccessReviewResponse{`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Allowed:` + fmt.Sprintf("%v", this.Allowed) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `EvaluationError:` + fmt.Sprintf("%v", this.EvaluationError) + `,`, + `}`, + }, "") + return s +} +func (this *SubjectRulesReview) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SubjectRulesReview{`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SubjectRulesReviewSpec", "SubjectRulesReviewSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectRulesReviewStatus", "SubjectRulesReviewStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *SubjectRulesReviewSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SubjectRulesReviewSpec{`, + `User:` + fmt.Sprintf("%v", this.User) + `,`, + `Groups:` + fmt.Sprintf("%v", this.Groups) + `,`, + `Scopes:` + strings.Replace(fmt.Sprintf("%v", this.Scopes), "OptionalScopes", "OptionalScopes", 1) + `,`, + `}`, + }, "") + return s +} +func (this *SubjectRulesReviewStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForRules := "[]PolicyRule{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + "," + } + repeatedStringForRules += "}" + s := strings.Join([]string{`&SubjectRulesReviewStatus{`, + `Rules:` + repeatedStringForRules + `,`, + `EvaluationError:` + fmt.Sprintf("%v", this.EvaluationError) + `,`, + `}`, + }, "") + return s +} +func (this *UserRestriction) String() string { + if this == nil { + return "nil" + } + repeatedStringForSelectors := "[]LabelSelector{" + for _, f := range this.Selectors { + repeatedStringForSelectors += fmt.Sprintf("%v", f) + "," + } + repeatedStringForSelectors += "}" + s := strings.Join([]string{`&UserRestriction{`, + `Users:` + fmt.Sprintf("%v", this.Users) + `,`, + `Groups:` + fmt.Sprintf("%v", this.Groups) + `,`, + `Selectors:` + repeatedStringForSelectors + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Action) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Action: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Action: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Verb", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Verb = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Group = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Content", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Content.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IsNonResourceURL", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IsNonResourceURL = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterRole) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterRole: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterRole: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rules = append(m.Rules, PolicyRule{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AggregationRule", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AggregationRule == nil { + m.AggregationRule = &v11.AggregationRule{} + } + if err := m.AggregationRule.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterRoleBinding: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterRoleBinding: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserNames", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.UserNames == nil { + m.UserNames = OptionalNames{} + } + if err := m.UserNames.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GroupNames", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.GroupNames == nil { + m.GroupNames = OptionalNames{} + } + if err := m.GroupNames.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subjects", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subjects = append(m.Subjects, v12.ObjectReference{}) + if err := m.Subjects[len(m.Subjects)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RoleRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.RoleRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterRoleBindingList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterRoleBindingList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ClusterRoleBinding{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterRoleList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterRoleList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ClusterRole{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GroupRestriction) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GroupRestriction: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GroupRestriction: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Groups = append(m.Groups, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selectors", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Selectors = append(m.Selectors, v1.LabelSelector{}) + if err := m.Selectors[len(m.Selectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IsPersonalSubjectAccessReview) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IsPersonalSubjectAccessReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IsPersonalSubjectAccessReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LocalResourceAccessReview) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LocalResourceAccessReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LocalResourceAccessReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Action.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LocalSubjectAccessReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LocalSubjectAccessReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Action.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.User = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GroupsSlice", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.GroupsSlice = append(m.GroupsSlice, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scopes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Scopes == nil { + m.Scopes = OptionalScopes{} + } + if err := m.Scopes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NamedClusterRole) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NamedClusterRole: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NamedClusterRole: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Role.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NamedClusterRoleBinding) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NamedClusterRoleBinding: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NamedClusterRoleBinding: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RoleBinding", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.RoleBinding.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NamedRole) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NamedRole: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NamedRole: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Role.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NamedRoleBinding) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NamedRoleBinding: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NamedRoleBinding: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RoleBinding", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.RoleBinding.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OptionalNames) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OptionalNames: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OptionalNames: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + *m = append(*m, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OptionalScopes) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OptionalScopes: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OptionalScopes: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + *m = append(*m, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PolicyRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PolicyRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PolicyRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Verbs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Verbs = append(m.Verbs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AttributeRestrictions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.AttributeRestrictions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIGroups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIGroups = append(m.APIGroups, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resources = append(m.Resources, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceNames", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceNames = append(m.ResourceNames, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NonResourceURLsSlice", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NonResourceURLsSlice = append(m.NonResourceURLsSlice, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceAccessReview) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceAccessReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceAccessReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Action.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceAccessReviewResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceAccessReviewResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceAccessReviewResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UsersSlice", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UsersSlice = append(m.UsersSlice, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GroupsSlice", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.GroupsSlice = append(m.GroupsSlice, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EvaluationError", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EvaluationError = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Role) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Role: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Role: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rules = append(m.Rules, PolicyRule{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RoleBinding) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RoleBinding: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RoleBinding: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserNames", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.UserNames == nil { + m.UserNames = OptionalNames{} + } + if err := m.UserNames.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GroupNames", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.GroupNames == nil { + m.GroupNames = OptionalNames{} + } + if err := m.GroupNames.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subjects", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subjects = append(m.Subjects, v12.ObjectReference{}) + if err := m.Subjects[len(m.Subjects)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RoleRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.RoleRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RoleBindingList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RoleBindingList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RoleBindingList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, RoleBinding{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RoleBindingRestriction) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RoleBindingRestriction: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RoleBindingRestriction: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RoleBindingRestrictionList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RoleBindingRestrictionList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RoleBindingRestrictionList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, RoleBindingRestriction{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RoleBindingRestrictionSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RoleBindingRestrictionSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RoleBindingRestrictionSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserRestriction", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.UserRestriction == nil { + m.UserRestriction = &UserRestriction{} + } + if err := m.UserRestriction.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GroupRestriction", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.GroupRestriction == nil { + m.GroupRestriction = &GroupRestriction{} + } + if err := m.GroupRestriction.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceAccountRestriction", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ServiceAccountRestriction == nil { + m.ServiceAccountRestriction = &ServiceAccountRestriction{} + } + if err := m.ServiceAccountRestriction.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RoleList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RoleList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RoleList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Role{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SelfSubjectRulesReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SelfSubjectRulesReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SelfSubjectRulesReviewSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SelfSubjectRulesReviewSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SelfSubjectRulesReviewSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scopes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Scopes == nil { + m.Scopes = OptionalScopes{} + } + if err := m.Scopes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceAccountReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceAccountReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceAccountReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceAccountRestriction) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceAccountRestriction: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceAccountRestriction: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceAccounts", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceAccounts = append(m.ServiceAccounts, ServiceAccountReference{}) + if err := m.ServiceAccounts[len(m.ServiceAccounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespaces", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespaces = append(m.Namespaces, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SubjectAccessReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SubjectAccessReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Action.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.User = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GroupsSlice", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.GroupsSlice = append(m.GroupsSlice, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scopes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Scopes == nil { + m.Scopes = OptionalScopes{} + } + if err := m.Scopes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SubjectAccessReviewResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SubjectAccessReviewResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SubjectAccessReviewResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Allowed", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Allowed = bool(v != 0) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EvaluationError", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EvaluationError = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SubjectRulesReview) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SubjectRulesReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SubjectRulesReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SubjectRulesReviewSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SubjectRulesReviewSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SubjectRulesReviewSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.User = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Groups = append(m.Groups, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scopes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Scopes == nil { + m.Scopes = OptionalScopes{} + } + if err := m.Scopes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SubjectRulesReviewStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SubjectRulesReviewStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rules = append(m.Rules, PolicyRule{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EvaluationError", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EvaluationError = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UserRestriction) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UserRestriction: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UserRestriction: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Users", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Users = append(m.Users, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Groups = append(m.Groups, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selectors", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Selectors = append(m.Selectors, v1.LabelSelector{}) + if err := m.Selectors[len(m.Selectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/openshift/api/authorization/v1/generated.proto b/vendor/github.com/openshift/api/authorization/v1/generated.proto new file mode 100644 index 000000000..7d0abe95c --- /dev/null +++ b/vendor/github.com/openshift/api/authorization/v1/generated.proto @@ -0,0 +1,557 @@ + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package github.com.openshift.api.authorization.v1; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/api/rbac/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "github.com/openshift/api/authorization/v1"; + +// Action describes a request to the API server +message Action { + // Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces + optional string namespace = 1; + + // Verb is one of: get, list, watch, create, update, delete + optional string verb = 2; + + // Group is the API group of the resource + // Serialized as resourceAPIGroup to avoid confusion with the 'groups' field when inlined + optional string resourceAPIGroup = 3; + + // Version is the API version of the resource + // Serialized as resourceAPIVersion to avoid confusion with TypeMeta.apiVersion and ObjectMeta.resourceVersion when inlined + optional string resourceAPIVersion = 4; + + // Resource is one of the existing resource types + optional string resource = 5; + + // ResourceName is the name of the resource being requested for a "get" or deleted for a "delete" + optional string resourceName = 6; + + // Path is the path of a non resource URL + optional string path = 8; + + // IsNonResourceURL is true if this is a request for a non-resource URL (outside of the resource hierarchy) + optional bool isNonResourceURL = 9; + + // Content is the actual content of the request for create and update + // +kubebuilder:pruning:PreserveUnknownFields + optional k8s.io.apimachinery.pkg.runtime.RawExtension content = 7; +} + +// ClusterRole is a logical grouping of PolicyRules that can be referenced as a unit by ClusterRoleBindings. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message ClusterRole { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Rules holds all the PolicyRules for this ClusterRole + repeated PolicyRule rules = 2; + + // AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. + // If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be + // stomped by the controller. + optional k8s.io.api.rbac.v1.AggregationRule aggregationRule = 3; +} + +// ClusterRoleBinding references a ClusterRole, but not contain it. It can reference any ClusterRole in the same namespace or in the global namespace. +// It adds who information via (Users and Groups) OR Subjects and namespace information by which namespace it exists in. +// ClusterRoleBindings in a given namespace only have effect in that namespace (excepting the master namespace which has power in all namespaces). +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message ClusterRoleBinding { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // UserNames holds all the usernames directly bound to the role. + // This field should only be specified when supporting legacy clients and servers. + // See Subjects for further details. + // +k8s:conversion-gen=false + // +optional + optional OptionalNames userNames = 2; + + // GroupNames holds all the groups directly bound to the role. + // This field should only be specified when supporting legacy clients and servers. + // See Subjects for further details. + // +k8s:conversion-gen=false + // +optional + optional OptionalNames groupNames = 3; + + // Subjects hold object references to authorize with this rule. + // This field is ignored if UserNames or GroupNames are specified to support legacy clients and servers. + // Thus newer clients that do not need to support backwards compatibility should send + // only fully qualified Subjects and should omit the UserNames and GroupNames fields. + // Clients that need to support backwards compatibility can use this field to build the UserNames and GroupNames. + repeated k8s.io.api.core.v1.ObjectReference subjects = 4; + + // RoleRef can only reference the current namespace and the global namespace. + // If the ClusterRoleRef cannot be resolved, the Authorizer must return an error. + // Since Policy is a singleton, this is sufficient knowledge to locate a role. + optional k8s.io.api.core.v1.ObjectReference roleRef = 5; +} + +// ClusterRoleBindingList is a collection of ClusterRoleBindings +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message ClusterRoleBindingList { + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of ClusterRoleBindings + repeated ClusterRoleBinding items = 2; +} + +// ClusterRoleList is a collection of ClusterRoles +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message ClusterRoleList { + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of ClusterRoles + repeated ClusterRole items = 2; +} + +// GroupRestriction matches a group either by a string match on the group name +// or a label selector applied to group labels. +message GroupRestriction { + // Groups is a list of groups used to match against an individual user's + // groups. If the user is a member of one of the whitelisted groups, the user + // is allowed to be bound to a role. + // +nullable + repeated string groups = 1; + + // Selectors specifies a list of label selectors over group labels. + // +nullable + repeated k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector labels = 2; +} + +// IsPersonalSubjectAccessReview is a marker for PolicyRule.AttributeRestrictions that denotes that subjectaccessreviews on self should be allowed +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message IsPersonalSubjectAccessReview { +} + +// LocalResourceAccessReview is a means to request a list of which users and groups are authorized to perform the action specified by spec in a particular namespace +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message LocalResourceAccessReview { + // Action describes the action being tested. The Namespace element is FORCED to the current namespace. + optional Action Action = 1; +} + +// LocalSubjectAccessReview is an object for requesting information about whether a user or group can perform an action in a particular namespace +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message LocalSubjectAccessReview { + // Action describes the action being tested. The Namespace element is FORCED to the current namespace. + optional Action Action = 1; + + // User is optional. If both User and Groups are empty, the current authenticated user is used. + optional string user = 2; + + // Groups is optional. Groups is the list of groups to which the User belongs. + // +k8s:conversion-gen=false + repeated string groups = 3; + + // Scopes to use for the evaluation. Empty means "use the unscoped (full) permissions of the user/groups". + // Nil for a self-SAR, means "use the scopes on this request". + // Nil for a regular SAR, means the same as empty. + // +k8s:conversion-gen=false + optional OptionalScopes scopes = 4; +} + +// NamedClusterRole relates a name with a cluster role +message NamedClusterRole { + // Name is the name of the cluster role + optional string name = 1; + + // Role is the cluster role being named + optional ClusterRole role = 2; +} + +// NamedClusterRoleBinding relates a name with a cluster role binding +message NamedClusterRoleBinding { + // Name is the name of the cluster role binding + optional string name = 1; + + // RoleBinding is the cluster role binding being named + optional ClusterRoleBinding roleBinding = 2; +} + +// NamedRole relates a Role with a name +message NamedRole { + // Name is the name of the role + optional string name = 1; + + // Role is the role being named + optional Role role = 2; +} + +// NamedRoleBinding relates a role binding with a name +message NamedRoleBinding { + // Name is the name of the role binding + optional string name = 1; + + // RoleBinding is the role binding being named + optional RoleBinding roleBinding = 2; +} + +// OptionalNames is an array that may also be left nil to distinguish between set and unset. +// +protobuf.nullable=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +message OptionalNames { + // items, if empty, will result in an empty slice + + repeated string items = 1; +} + +// OptionalScopes is an array that may also be left nil to distinguish between set and unset. +// +protobuf.nullable=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +message OptionalScopes { + // items, if empty, will result in an empty slice + + repeated string items = 1; +} + +// PolicyRule holds information that describes a policy rule, but does not contain information +// about who the rule applies to or which namespace the rule applies to. +message PolicyRule { + // Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds. + repeated string verbs = 1; + + // AttributeRestrictions will vary depending on what the Authorizer/AuthorizationAttributeBuilder pair supports. + // If the Authorizer does not recognize how to handle the AttributeRestrictions, the Authorizer should report an error. + // +kubebuilder:pruning:PreserveUnknownFields + optional k8s.io.apimachinery.pkg.runtime.RawExtension attributeRestrictions = 2; + + // APIGroups is the name of the APIGroup that contains the resources. If this field is empty, then both kubernetes and origin API groups are assumed. + // That means that if an action is requested against one of the enumerated resources in either the kubernetes or the origin API group, the request + // will be allowed + // +optional + // +nullable + repeated string apiGroups = 3; + + // Resources is a list of resources this rule applies to. ResourceAll represents all resources. + repeated string resources = 4; + + // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. + repeated string resourceNames = 5; + + // NonResourceURLsSlice is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path + // This name is intentionally different than the internal type so that the DefaultConvert works nicely and because the ordering may be different. + repeated string nonResourceURLs = 6; +} + +// ResourceAccessReview is a means to request a list of which users and groups are authorized to perform the +// action specified by spec +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message ResourceAccessReview { + // Action describes the action being tested. + optional Action Action = 1; +} + +// ResourceAccessReviewResponse describes who can perform the action +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message ResourceAccessReviewResponse { + // Namespace is the namespace used for the access review + optional string namespace = 1; + + // UsersSlice is the list of users who can perform the action + // +k8s:conversion-gen=false + repeated string users = 2; + + // GroupsSlice is the list of groups who can perform the action + // +k8s:conversion-gen=false + repeated string groups = 3; + + // EvaluationError is an indication that some error occurred during resolution, but partial results can still be returned. + // It is entirely possible to get an error and be able to continue determine authorization status in spite of it. This is + // most common when a bound role is missing, but enough roles are still present and bound to reason about the request. + optional string evalutionError = 4; +} + +// Role is a logical grouping of PolicyRules that can be referenced as a unit by RoleBindings. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message Role { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Rules holds all the PolicyRules for this Role + repeated PolicyRule rules = 2; +} + +// RoleBinding references a Role, but not contain it. It can reference any Role in the same namespace or in the global namespace. +// It adds who information via (Users and Groups) OR Subjects and namespace information by which namespace it exists in. +// RoleBindings in a given namespace only have effect in that namespace (excepting the master namespace which has power in all namespaces). +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message RoleBinding { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // UserNames holds all the usernames directly bound to the role. + // This field should only be specified when supporting legacy clients and servers. + // See Subjects for further details. + // +k8s:conversion-gen=false + // +optional + optional OptionalNames userNames = 2; + + // GroupNames holds all the groups directly bound to the role. + // This field should only be specified when supporting legacy clients and servers. + // See Subjects for further details. + // +k8s:conversion-gen=false + // +optional + optional OptionalNames groupNames = 3; + + // Subjects hold object references to authorize with this rule. + // This field is ignored if UserNames or GroupNames are specified to support legacy clients and servers. + // Thus newer clients that do not need to support backwards compatibility should send + // only fully qualified Subjects and should omit the UserNames and GroupNames fields. + // Clients that need to support backwards compatibility can use this field to build the UserNames and GroupNames. + repeated k8s.io.api.core.v1.ObjectReference subjects = 4; + + // RoleRef can only reference the current namespace and the global namespace. + // If the RoleRef cannot be resolved, the Authorizer must return an error. + // Since Policy is a singleton, this is sufficient knowledge to locate a role. + optional k8s.io.api.core.v1.ObjectReference roleRef = 5; +} + +// RoleBindingList is a collection of RoleBindings +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message RoleBindingList { + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of RoleBindings + repeated RoleBinding items = 2; +} + +// RoleBindingRestriction is an object that can be matched against a subject +// (user, group, or service account) to determine whether rolebindings on that +// subject are allowed in the namespace to which the RoleBindingRestriction +// belongs. If any one of those RoleBindingRestriction objects matches +// a subject, rolebindings on that subject in the namespace are allowed. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message RoleBindingRestriction { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec defines the matcher. + optional RoleBindingRestrictionSpec spec = 2; +} + +// RoleBindingRestrictionList is a collection of RoleBindingRestriction objects. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message RoleBindingRestrictionList { + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of RoleBindingRestriction objects. + repeated RoleBindingRestriction items = 2; +} + +// RoleBindingRestrictionSpec defines a rolebinding restriction. Exactly one +// field must be non-nil. +message RoleBindingRestrictionSpec { + // UserRestriction matches against user subjects. + // +nullable + optional UserRestriction userrestriction = 1; + + // GroupRestriction matches against group subjects. + // +nullable + optional GroupRestriction grouprestriction = 2; + + // ServiceAccountRestriction matches against service-account subjects. + // +nullable + optional ServiceAccountRestriction serviceaccountrestriction = 3; +} + +// RoleList is a collection of Roles +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message RoleList { + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of Roles + repeated Role items = 2; +} + +// SelfSubjectRulesReview is a resource you can create to determine which actions you can perform in a namespace +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message SelfSubjectRulesReview { + // Spec adds information about how to conduct the check + optional SelfSubjectRulesReviewSpec spec = 1; + + // Status is completed by the server to tell which permissions you have + optional SubjectRulesReviewStatus status = 2; +} + +// SelfSubjectRulesReviewSpec adds information about how to conduct the check +message SelfSubjectRulesReviewSpec { + // Scopes to use for the evaluation. Empty means "use the unscoped (full) permissions of the user/groups". + // Nil means "use the scopes on this request". + // +k8s:conversion-gen=false + optional OptionalScopes scopes = 1; +} + +// ServiceAccountReference specifies a service account and namespace by their +// names. +message ServiceAccountReference { + // Name is the name of the service account. + optional string name = 1; + + // Namespace is the namespace of the service account. Service accounts from + // inside the whitelisted namespaces are allowed to be bound to roles. If + // Namespace is empty, then the namespace of the RoleBindingRestriction in + // which the ServiceAccountReference is embedded is used. + optional string namespace = 2; +} + +// ServiceAccountRestriction matches a service account by a string match on +// either the service-account name or the name of the service account's +// namespace. +message ServiceAccountRestriction { + // ServiceAccounts specifies a list of literal service-account names. + repeated ServiceAccountReference serviceaccounts = 1; + + // Namespaces specifies a list of literal namespace names. + repeated string namespaces = 2; +} + +// SubjectAccessReview is an object for requesting information about whether a user or group can perform an action +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message SubjectAccessReview { + // Action describes the action being tested. + optional Action Action = 1; + + // User is optional. If both User and Groups are empty, the current authenticated user is used. + optional string user = 2; + + // GroupsSlice is optional. Groups is the list of groups to which the User belongs. + // +k8s:conversion-gen=false + repeated string groups = 3; + + // Scopes to use for the evaluation. Empty means "use the unscoped (full) permissions of the user/groups". + // Nil for a self-SAR, means "use the scopes on this request". + // Nil for a regular SAR, means the same as empty. + // +k8s:conversion-gen=false + optional OptionalScopes scopes = 4; +} + +// SubjectAccessReviewResponse describes whether or not a user or group can perform an action +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message SubjectAccessReviewResponse { + // Namespace is the namespace used for the access review + optional string namespace = 1; + + // Allowed is required. True if the action would be allowed, false otherwise. + optional bool allowed = 2; + + // Reason is optional. It indicates why a request was allowed or denied. + optional string reason = 3; + + // EvaluationError is an indication that some error occurred during the authorization check. + // It is entirely possible to get an error and be able to continue determine authorization status in spite of it. This is + // most common when a bound role is missing, but enough roles are still present and bound to reason about the request. + optional string evaluationError = 4; +} + +// SubjectRulesReview is a resource you can create to determine which actions another user can perform in a namespace +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message SubjectRulesReview { + // Spec adds information about how to conduct the check + optional SubjectRulesReviewSpec spec = 1; + + // Status is completed by the server to tell which permissions you have + optional SubjectRulesReviewStatus status = 2; +} + +// SubjectRulesReviewSpec adds information about how to conduct the check +message SubjectRulesReviewSpec { + // User is optional. At least one of User and Groups must be specified. + optional string user = 1; + + // Groups is optional. Groups is the list of groups to which the User belongs. At least one of User and Groups must be specified. + repeated string groups = 2; + + // Scopes to use for the evaluation. Empty means "use the unscoped (full) permissions of the user/groups". + optional OptionalScopes scopes = 3; +} + +// SubjectRulesReviewStatus is contains the result of a rules check +message SubjectRulesReviewStatus { + // Rules is the list of rules (no particular sort) that are allowed for the subject + repeated PolicyRule rules = 1; + + // EvaluationError can appear in combination with Rules. It means some error happened during evaluation + // that may have prevented additional rules from being populated. + optional string evaluationError = 2; +} + +// UserRestriction matches a user either by a string match on the user name, +// a string match on the name of a group to which the user belongs, or a label +// selector applied to the user labels. +message UserRestriction { + // Users specifies a list of literal user names. + repeated string users = 1; + + // Groups specifies a list of literal group names. + // +nullable + repeated string groups = 2; + + // Selectors specifies a list of label selectors over user labels. + // +nullable + repeated k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector labels = 3; +} + diff --git a/vendor/github.com/openshift/api/authorization/v1/legacy.go b/vendor/github.com/openshift/api/authorization/v1/legacy.go new file mode 100644 index 000000000..f437a242e --- /dev/null +++ b/vendor/github.com/openshift/api/authorization/v1/legacy.go @@ -0,0 +1,43 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + legacyGroupVersion = schema.GroupVersion{Group: "", Version: "v1"} + legacySchemeBuilder = runtime.NewSchemeBuilder(addLegacyKnownTypes, corev1.AddToScheme, rbacv1.AddToScheme) + DeprecatedInstallWithoutGroup = legacySchemeBuilder.AddToScheme +) + +func addLegacyKnownTypes(scheme *runtime.Scheme) error { + types := []runtime.Object{ + &Role{}, + &RoleBinding{}, + &RoleBindingList{}, + &RoleList{}, + + &SelfSubjectRulesReview{}, + &SubjectRulesReview{}, + &ResourceAccessReview{}, + &SubjectAccessReview{}, + &LocalResourceAccessReview{}, + &LocalSubjectAccessReview{}, + &ResourceAccessReviewResponse{}, + &SubjectAccessReviewResponse{}, + &IsPersonalSubjectAccessReview{}, + + &ClusterRole{}, + &ClusterRoleBinding{}, + &ClusterRoleBindingList{}, + &ClusterRoleList{}, + + &RoleBindingRestriction{}, + &RoleBindingRestrictionList{}, + } + scheme.AddKnownTypes(legacyGroupVersion, types...) + return nil +} diff --git a/vendor/github.com/openshift/api/authorization/v1/register.go b/vendor/github.com/openshift/api/authorization/v1/register.go new file mode 100644 index 000000000..f1e12477b --- /dev/null +++ b/vendor/github.com/openshift/api/authorization/v1/register.go @@ -0,0 +1,60 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + GroupName = "authorization.openshift.io" + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, corev1.AddToScheme, rbacv1.AddToScheme) + // Install is a function which adds this version to a scheme + Install = schemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = schemeBuilder.AddToScheme +) + +// Resource generated code relies on this being here, but it logically belongs to the group +// DEPRECATED +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &Role{}, + &RoleBinding{}, + &RoleBindingList{}, + &RoleList{}, + + &SelfSubjectRulesReview{}, + &SubjectRulesReview{}, + &ResourceAccessReview{}, + &SubjectAccessReview{}, + &LocalResourceAccessReview{}, + &LocalSubjectAccessReview{}, + &ResourceAccessReviewResponse{}, + &SubjectAccessReviewResponse{}, + &IsPersonalSubjectAccessReview{}, + + &ClusterRole{}, + &ClusterRoleBinding{}, + &ClusterRoleBindingList{}, + &ClusterRoleList{}, + + &RoleBindingRestriction{}, + &RoleBindingRestrictionList{}, + ) + metav1.AddToGroupVersion(scheme, GroupVersion) + return nil +} diff --git a/vendor/github.com/openshift/api/authorization/v1/stable.rolebindingrestriction.testsuite.yaml b/vendor/github.com/openshift/api/authorization/v1/stable.rolebindingrestriction.testsuite.yaml new file mode 100644 index 000000000..2b8211574 --- /dev/null +++ b/vendor/github.com/openshift/api/authorization/v1/stable.rolebindingrestriction.testsuite.yaml @@ -0,0 +1,14 @@ +apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this +name: "[Stable] Authorization" +crd: 0000_03_authorization-openshift_01_rolebindingrestriction.crd.yaml +tests: + onCreate: + - name: Should be able to create a minimal RoleBindingRestriction + initial: | + apiVersion: authorization.openshift.io/v1 + kind: RoleBindingRestriction + spec: {} # No spec is required for a RoleBindingRestriction + expected: | + apiVersion: authorization.openshift.io/v1 + kind: RoleBindingRestriction + spec: {} diff --git a/vendor/github.com/openshift/api/authorization/v1/types.go b/vendor/github.com/openshift/api/authorization/v1/types.go new file mode 100644 index 000000000..da229b3a8 --- /dev/null +++ b/vendor/github.com/openshift/api/authorization/v1/types.go @@ -0,0 +1,632 @@ +package v1 + +import ( + "fmt" + + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kruntime "k8s.io/apimachinery/pkg/runtime" +) + +// Authorization is calculated against +// 1. all deny RoleBinding PolicyRules in the master namespace - short circuit on match +// 2. all allow RoleBinding PolicyRules in the master namespace - short circuit on match +// 3. all deny RoleBinding PolicyRules in the namespace - short circuit on match +// 4. all allow RoleBinding PolicyRules in the namespace - short circuit on match +// 5. deny by default + +const ( + // GroupKind is string representation of kind used in role binding subjects that represents the "group". + GroupKind = "Group" + // UserKind is string representation of kind used in role binding subjects that represents the "user". + UserKind = "User" + + ScopesKey = "scopes.authorization.openshift.io" +) + +// PolicyRule holds information that describes a policy rule, but does not contain information +// about who the rule applies to or which namespace the rule applies to. +type PolicyRule struct { + // Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds. + Verbs []string `json:"verbs" protobuf:"bytes,1,rep,name=verbs"` + // AttributeRestrictions will vary depending on what the Authorizer/AuthorizationAttributeBuilder pair supports. + // If the Authorizer does not recognize how to handle the AttributeRestrictions, the Authorizer should report an error. + // +kubebuilder:pruning:PreserveUnknownFields + AttributeRestrictions kruntime.RawExtension `json:"attributeRestrictions,omitempty" protobuf:"bytes,2,opt,name=attributeRestrictions"` + // APIGroups is the name of the APIGroup that contains the resources. If this field is empty, then both kubernetes and origin API groups are assumed. + // That means that if an action is requested against one of the enumerated resources in either the kubernetes or the origin API group, the request + // will be allowed + // +optional + // +nullable + APIGroups []string `json:"apiGroups,omitempty" protobuf:"bytes,3,rep,name=apiGroups"` + // Resources is a list of resources this rule applies to. ResourceAll represents all resources. + Resources []string `json:"resources" protobuf:"bytes,4,rep,name=resources"` + // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. + ResourceNames []string `json:"resourceNames,omitempty" protobuf:"bytes,5,rep,name=resourceNames"` + // NonResourceURLsSlice is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path + // This name is intentionally different than the internal type so that the DefaultConvert works nicely and because the ordering may be different. + NonResourceURLsSlice []string `json:"nonResourceURLs,omitempty" protobuf:"bytes,6,rep,name=nonResourceURLs"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// IsPersonalSubjectAccessReview is a marker for PolicyRule.AttributeRestrictions that denotes that subjectaccessreviews on self should be allowed +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type IsPersonalSubjectAccessReview struct { + metav1.TypeMeta `json:",inline"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Role is a logical grouping of PolicyRules that can be referenced as a unit by RoleBindings. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type Role struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Rules holds all the PolicyRules for this Role + Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` +} + +// OptionalNames is an array that may also be left nil to distinguish between set and unset. +// +protobuf.nullable=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +type OptionalNames []string + +func (t OptionalNames) String() string { + return fmt.Sprintf("%v", []string(t)) +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RoleBinding references a Role, but not contain it. It can reference any Role in the same namespace or in the global namespace. +// It adds who information via (Users and Groups) OR Subjects and namespace information by which namespace it exists in. +// RoleBindings in a given namespace only have effect in that namespace (excepting the master namespace which has power in all namespaces). +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type RoleBinding struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // UserNames holds all the usernames directly bound to the role. + // This field should only be specified when supporting legacy clients and servers. + // See Subjects for further details. + // +k8s:conversion-gen=false + // +optional + UserNames OptionalNames `json:"userNames" protobuf:"bytes,2,rep,name=userNames"` + // GroupNames holds all the groups directly bound to the role. + // This field should only be specified when supporting legacy clients and servers. + // See Subjects for further details. + // +k8s:conversion-gen=false + // +optional + GroupNames OptionalNames `json:"groupNames" protobuf:"bytes,3,rep,name=groupNames"` + // Subjects hold object references to authorize with this rule. + // This field is ignored if UserNames or GroupNames are specified to support legacy clients and servers. + // Thus newer clients that do not need to support backwards compatibility should send + // only fully qualified Subjects and should omit the UserNames and GroupNames fields. + // Clients that need to support backwards compatibility can use this field to build the UserNames and GroupNames. + Subjects []corev1.ObjectReference `json:"subjects" protobuf:"bytes,4,rep,name=subjects"` + + // RoleRef can only reference the current namespace and the global namespace. + // If the RoleRef cannot be resolved, the Authorizer must return an error. + // Since Policy is a singleton, this is sufficient knowledge to locate a role. + RoleRef corev1.ObjectReference `json:"roleRef" protobuf:"bytes,5,opt,name=roleRef"` +} + +// NamedRole relates a Role with a name +type NamedRole struct { + // Name is the name of the role + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // Role is the role being named + Role Role `json:"role" protobuf:"bytes,2,opt,name=role"` +} + +// NamedRoleBinding relates a role binding with a name +type NamedRoleBinding struct { + // Name is the name of the role binding + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // RoleBinding is the role binding being named + RoleBinding RoleBinding `json:"roleBinding" protobuf:"bytes,2,opt,name=roleBinding"` +} + +// +genclient +// +genclient:onlyVerbs=create +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// SelfSubjectRulesReview is a resource you can create to determine which actions you can perform in a namespace +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type SelfSubjectRulesReview struct { + metav1.TypeMeta `json:",inline"` + + // Spec adds information about how to conduct the check + Spec SelfSubjectRulesReviewSpec `json:"spec" protobuf:"bytes,1,opt,name=spec"` + + // Status is completed by the server to tell which permissions you have + Status SubjectRulesReviewStatus `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"` +} + +// SelfSubjectRulesReviewSpec adds information about how to conduct the check +type SelfSubjectRulesReviewSpec struct { + // Scopes to use for the evaluation. Empty means "use the unscoped (full) permissions of the user/groups". + // Nil means "use the scopes on this request". + // +k8s:conversion-gen=false + Scopes OptionalScopes `json:"scopes" protobuf:"bytes,1,rep,name=scopes"` +} + +// +genclient +// +genclient:onlyVerbs=create +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// SubjectRulesReview is a resource you can create to determine which actions another user can perform in a namespace +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type SubjectRulesReview struct { + metav1.TypeMeta `json:",inline"` + + // Spec adds information about how to conduct the check + Spec SubjectRulesReviewSpec `json:"spec" protobuf:"bytes,1,opt,name=spec"` + + // Status is completed by the server to tell which permissions you have + Status SubjectRulesReviewStatus `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"` +} + +// SubjectRulesReviewSpec adds information about how to conduct the check +type SubjectRulesReviewSpec struct { + // User is optional. At least one of User and Groups must be specified. + User string `json:"user" protobuf:"bytes,1,opt,name=user"` + // Groups is optional. Groups is the list of groups to which the User belongs. At least one of User and Groups must be specified. + Groups []string `json:"groups" protobuf:"bytes,2,rep,name=groups"` + // Scopes to use for the evaluation. Empty means "use the unscoped (full) permissions of the user/groups". + Scopes OptionalScopes `json:"scopes" protobuf:"bytes,3,opt,name=scopes"` +} + +// SubjectRulesReviewStatus is contains the result of a rules check +type SubjectRulesReviewStatus struct { + // Rules is the list of rules (no particular sort) that are allowed for the subject + Rules []PolicyRule `json:"rules" protobuf:"bytes,1,rep,name=rules"` + // EvaluationError can appear in combination with Rules. It means some error happened during evaluation + // that may have prevented additional rules from being populated. + EvaluationError string `json:"evaluationError,omitempty" protobuf:"bytes,2,opt,name=evaluationError"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ResourceAccessReviewResponse describes who can perform the action +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ResourceAccessReviewResponse struct { + metav1.TypeMeta `json:",inline"` + + // Namespace is the namespace used for the access review + Namespace string `json:"namespace,omitempty" protobuf:"bytes,1,opt,name=namespace"` + // UsersSlice is the list of users who can perform the action + // +k8s:conversion-gen=false + UsersSlice []string `json:"users" protobuf:"bytes,2,rep,name=users"` + // GroupsSlice is the list of groups who can perform the action + // +k8s:conversion-gen=false + GroupsSlice []string `json:"groups" protobuf:"bytes,3,rep,name=groups"` + + // EvaluationError is an indication that some error occurred during resolution, but partial results can still be returned. + // It is entirely possible to get an error and be able to continue determine authorization status in spite of it. This is + // most common when a bound role is missing, but enough roles are still present and bound to reason about the request. + EvaluationError string `json:"evalutionError" protobuf:"bytes,4,opt,name=evalutionError"` +} + +// +genclient +// +genclient:nonNamespaced +// +genclient:skipVerbs=apply,get,list,create,update,patch,delete,deleteCollection,watch +// +genclient:method=Create,verb=create,result=ResourceAccessReviewResponse +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ResourceAccessReview is a means to request a list of which users and groups are authorized to perform the +// action specified by spec +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ResourceAccessReview struct { + metav1.TypeMeta `json:",inline"` + + // Action describes the action being tested. + Action `json:",inline" protobuf:"bytes,1,opt,name=Action"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// SubjectAccessReviewResponse describes whether or not a user or group can perform an action +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type SubjectAccessReviewResponse struct { + metav1.TypeMeta `json:",inline"` + + // Namespace is the namespace used for the access review + Namespace string `json:"namespace,omitempty" protobuf:"bytes,1,opt,name=namespace"` + // Allowed is required. True if the action would be allowed, false otherwise. + Allowed bool `json:"allowed" protobuf:"varint,2,opt,name=allowed"` + // Reason is optional. It indicates why a request was allowed or denied. + Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"` + // EvaluationError is an indication that some error occurred during the authorization check. + // It is entirely possible to get an error and be able to continue determine authorization status in spite of it. This is + // most common when a bound role is missing, but enough roles are still present and bound to reason about the request. + EvaluationError string `json:"evaluationError,omitempty" protobuf:"bytes,4,opt,name=evaluationError"` +} + +// OptionalScopes is an array that may also be left nil to distinguish between set and unset. +// +protobuf.nullable=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +type OptionalScopes []string + +func (t OptionalScopes) String() string { + return fmt.Sprintf("%v", []string(t)) +} + +// +genclient +// +genclient:nonNamespaced +// +genclient:skipVerbs=apply,get,list,create,update,patch,delete,deleteCollection,watch +// +genclient:method=Create,verb=create,result=SubjectAccessReviewResponse +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// SubjectAccessReview is an object for requesting information about whether a user or group can perform an action +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type SubjectAccessReview struct { + metav1.TypeMeta `json:",inline"` + + // Action describes the action being tested. + Action `json:",inline" protobuf:"bytes,1,opt,name=Action"` + // User is optional. If both User and Groups are empty, the current authenticated user is used. + User string `json:"user" protobuf:"bytes,2,opt,name=user"` + // GroupsSlice is optional. Groups is the list of groups to which the User belongs. + // +k8s:conversion-gen=false + GroupsSlice []string `json:"groups" protobuf:"bytes,3,rep,name=groups"` + // Scopes to use for the evaluation. Empty means "use the unscoped (full) permissions of the user/groups". + // Nil for a self-SAR, means "use the scopes on this request". + // Nil for a regular SAR, means the same as empty. + // +k8s:conversion-gen=false + Scopes OptionalScopes `json:"scopes" protobuf:"bytes,4,rep,name=scopes"` +} + +// +genclient +// +genclient:skipVerbs=apply,get,list,create,update,patch,delete,deleteCollection,watch +// +genclient:method=Create,verb=create,result=ResourceAccessReviewResponse +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// LocalResourceAccessReview is a means to request a list of which users and groups are authorized to perform the action specified by spec in a particular namespace +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type LocalResourceAccessReview struct { + metav1.TypeMeta `json:",inline"` + + // Action describes the action being tested. The Namespace element is FORCED to the current namespace. + Action `json:",inline" protobuf:"bytes,1,opt,name=Action"` +} + +// +genclient +// +genclient:skipVerbs=apply,get,list,create,update,patch,delete,deleteCollection,watch +// +genclient:method=Create,verb=create,result=SubjectAccessReviewResponse +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// LocalSubjectAccessReview is an object for requesting information about whether a user or group can perform an action in a particular namespace +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type LocalSubjectAccessReview struct { + metav1.TypeMeta `json:",inline"` + + // Action describes the action being tested. The Namespace element is FORCED to the current namespace. + Action `json:",inline" protobuf:"bytes,1,opt,name=Action"` + // User is optional. If both User and Groups are empty, the current authenticated user is used. + User string `json:"user" protobuf:"bytes,2,opt,name=user"` + // Groups is optional. Groups is the list of groups to which the User belongs. + // +k8s:conversion-gen=false + GroupsSlice []string `json:"groups" protobuf:"bytes,3,rep,name=groups"` + // Scopes to use for the evaluation. Empty means "use the unscoped (full) permissions of the user/groups". + // Nil for a self-SAR, means "use the scopes on this request". + // Nil for a regular SAR, means the same as empty. + // +k8s:conversion-gen=false + Scopes OptionalScopes `json:"scopes" protobuf:"bytes,4,rep,name=scopes"` +} + +// Action describes a request to the API server +type Action struct { + // Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces + Namespace string `json:"namespace" protobuf:"bytes,1,opt,name=namespace"` + // Verb is one of: get, list, watch, create, update, delete + Verb string `json:"verb" protobuf:"bytes,2,opt,name=verb"` + // Group is the API group of the resource + // Serialized as resourceAPIGroup to avoid confusion with the 'groups' field when inlined + Group string `json:"resourceAPIGroup" protobuf:"bytes,3,opt,name=resourceAPIGroup"` + // Version is the API version of the resource + // Serialized as resourceAPIVersion to avoid confusion with TypeMeta.apiVersion and ObjectMeta.resourceVersion when inlined + Version string `json:"resourceAPIVersion" protobuf:"bytes,4,opt,name=resourceAPIVersion"` + // Resource is one of the existing resource types + Resource string `json:"resource" protobuf:"bytes,5,opt,name=resource"` + // ResourceName is the name of the resource being requested for a "get" or deleted for a "delete" + ResourceName string `json:"resourceName" protobuf:"bytes,6,opt,name=resourceName"` + // Path is the path of a non resource URL + Path string `json:"path" protobuf:"bytes,8,opt,name=path"` + // IsNonResourceURL is true if this is a request for a non-resource URL (outside of the resource hierarchy) + IsNonResourceURL bool `json:"isNonResourceURL" protobuf:"varint,9,opt,name=isNonResourceURL"` + // Content is the actual content of the request for create and update + // +kubebuilder:pruning:PreserveUnknownFields + Content kruntime.RawExtension `json:"content,omitempty" protobuf:"bytes,7,opt,name=content"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RoleBindingList is a collection of RoleBindings +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type RoleBindingList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of RoleBindings + Items []RoleBinding `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RoleList is a collection of Roles +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type RoleList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of Roles + Items []Role `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterRole is a logical grouping of PolicyRules that can be referenced as a unit by ClusterRoleBindings. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ClusterRole struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Rules holds all the PolicyRules for this ClusterRole + Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` + + // AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. + // If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be + // stomped by the controller. + AggregationRule *rbacv1.AggregationRule `json:"aggregationRule,omitempty" protobuf:"bytes,3,opt,name=aggregationRule"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterRoleBinding references a ClusterRole, but not contain it. It can reference any ClusterRole in the same namespace or in the global namespace. +// It adds who information via (Users and Groups) OR Subjects and namespace information by which namespace it exists in. +// ClusterRoleBindings in a given namespace only have effect in that namespace (excepting the master namespace which has power in all namespaces). +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ClusterRoleBinding struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // UserNames holds all the usernames directly bound to the role. + // This field should only be specified when supporting legacy clients and servers. + // See Subjects for further details. + // +k8s:conversion-gen=false + // +optional + UserNames OptionalNames `json:"userNames" protobuf:"bytes,2,rep,name=userNames"` + // GroupNames holds all the groups directly bound to the role. + // This field should only be specified when supporting legacy clients and servers. + // See Subjects for further details. + // +k8s:conversion-gen=false + // +optional + GroupNames OptionalNames `json:"groupNames" protobuf:"bytes,3,rep,name=groupNames"` + // Subjects hold object references to authorize with this rule. + // This field is ignored if UserNames or GroupNames are specified to support legacy clients and servers. + // Thus newer clients that do not need to support backwards compatibility should send + // only fully qualified Subjects and should omit the UserNames and GroupNames fields. + // Clients that need to support backwards compatibility can use this field to build the UserNames and GroupNames. + Subjects []corev1.ObjectReference `json:"subjects" protobuf:"bytes,4,rep,name=subjects"` + + // RoleRef can only reference the current namespace and the global namespace. + // If the ClusterRoleRef cannot be resolved, the Authorizer must return an error. + // Since Policy is a singleton, this is sufficient knowledge to locate a role. + RoleRef corev1.ObjectReference `json:"roleRef" protobuf:"bytes,5,opt,name=roleRef"` +} + +// NamedClusterRole relates a name with a cluster role +type NamedClusterRole struct { + // Name is the name of the cluster role + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // Role is the cluster role being named + Role ClusterRole `json:"role" protobuf:"bytes,2,opt,name=role"` +} + +// NamedClusterRoleBinding relates a name with a cluster role binding +type NamedClusterRoleBinding struct { + // Name is the name of the cluster role binding + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // RoleBinding is the cluster role binding being named + RoleBinding ClusterRoleBinding `json:"roleBinding" protobuf:"bytes,2,opt,name=roleBinding"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterRoleBindingList is a collection of ClusterRoleBindings +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ClusterRoleBindingList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of ClusterRoleBindings + Items []ClusterRoleBinding `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterRoleList is a collection of ClusterRoles +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ClusterRoleList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of ClusterRoles + Items []ClusterRole `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RoleBindingRestriction is an object that can be matched against a subject +// (user, group, or service account) to determine whether rolebindings on that +// subject are allowed in the namespace to which the RoleBindingRestriction +// belongs. If any one of those RoleBindingRestriction objects matches +// a subject, rolebindings on that subject in the namespace are allowed. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type RoleBindingRestriction struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` + + // Spec defines the matcher. + Spec RoleBindingRestrictionSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` +} + +// RoleBindingRestrictionSpec defines a rolebinding restriction. Exactly one +// field must be non-nil. +type RoleBindingRestrictionSpec struct { + // UserRestriction matches against user subjects. + // +nullable + UserRestriction *UserRestriction `json:"userrestriction" protobuf:"bytes,1,opt,name=userrestriction"` + + // GroupRestriction matches against group subjects. + // +nullable + GroupRestriction *GroupRestriction `json:"grouprestriction" protobuf:"bytes,2,opt,name=grouprestriction"` + + // ServiceAccountRestriction matches against service-account subjects. + // +nullable + ServiceAccountRestriction *ServiceAccountRestriction `json:"serviceaccountrestriction" protobuf:"bytes,3,opt,name=serviceaccountrestriction"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RoleBindingRestrictionList is a collection of RoleBindingRestriction objects. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type RoleBindingRestrictionList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of RoleBindingRestriction objects. + Items []RoleBindingRestriction `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// UserRestriction matches a user either by a string match on the user name, +// a string match on the name of a group to which the user belongs, or a label +// selector applied to the user labels. +type UserRestriction struct { + // Users specifies a list of literal user names. + Users []string `json:"users" protobuf:"bytes,1,rep,name=users"` + + // Groups specifies a list of literal group names. + // +nullable + Groups []string `json:"groups" protobuf:"bytes,2,rep,name=groups"` + + // Selectors specifies a list of label selectors over user labels. + // +nullable + Selectors []metav1.LabelSelector `json:"labels" protobuf:"bytes,3,rep,name=labels"` +} + +// GroupRestriction matches a group either by a string match on the group name +// or a label selector applied to group labels. +type GroupRestriction struct { + // Groups is a list of groups used to match against an individual user's + // groups. If the user is a member of one of the whitelisted groups, the user + // is allowed to be bound to a role. + // +nullable + Groups []string `json:"groups" protobuf:"bytes,1,rep,name=groups"` + + // Selectors specifies a list of label selectors over group labels. + // +nullable + Selectors []metav1.LabelSelector `json:"labels" protobuf:"bytes,2,rep,name=labels"` +} + +// ServiceAccountRestriction matches a service account by a string match on +// either the service-account name or the name of the service account's +// namespace. +type ServiceAccountRestriction struct { + // ServiceAccounts specifies a list of literal service-account names. + ServiceAccounts []ServiceAccountReference `json:"serviceaccounts" protobuf:"bytes,1,rep,name=serviceaccounts"` + + // Namespaces specifies a list of literal namespace names. + Namespaces []string `json:"namespaces" protobuf:"bytes,2,rep,name=namespaces"` +} + +// ServiceAccountReference specifies a service account and namespace by their +// names. +type ServiceAccountReference struct { + // Name is the name of the service account. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + + // Namespace is the namespace of the service account. Service accounts from + // inside the whitelisted namespaces are allowed to be bound to roles. If + // Namespace is empty, then the namespace of the RoleBindingRestriction in + // which the ServiceAccountReference is embedded is used. + Namespace string `json:"namespace" protobuf:"bytes,2,opt,name=namespace"` +} diff --git a/vendor/github.com/openshift/api/authorization/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/authorization/v1/zz_generated.deepcopy.go new file mode 100644 index 000000000..1214fc02b --- /dev/null +++ b/vendor/github.com/openshift/api/authorization/v1/zz_generated.deepcopy.go @@ -0,0 +1,994 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Action) DeepCopyInto(out *Action) { + *out = *in + in.Content.DeepCopyInto(&out.Content) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Action. +func (in *Action) DeepCopy() *Action { + if in == nil { + return nil + } + out := new(Action) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterRole) DeepCopyInto(out *ClusterRole) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]PolicyRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AggregationRule != nil { + in, out := &in.AggregationRule, &out.AggregationRule + *out = new(rbacv1.AggregationRule) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRole. +func (in *ClusterRole) DeepCopy() *ClusterRole { + if in == nil { + return nil + } + out := new(ClusterRole) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterRole) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterRoleBinding) DeepCopyInto(out *ClusterRoleBinding) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.UserNames != nil { + in, out := &in.UserNames, &out.UserNames + *out = make(OptionalNames, len(*in)) + copy(*out, *in) + } + if in.GroupNames != nil { + in, out := &in.GroupNames, &out.GroupNames + *out = make(OptionalNames, len(*in)) + copy(*out, *in) + } + if in.Subjects != nil { + in, out := &in.Subjects, &out.Subjects + *out = make([]corev1.ObjectReference, len(*in)) + copy(*out, *in) + } + out.RoleRef = in.RoleRef + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRoleBinding. +func (in *ClusterRoleBinding) DeepCopy() *ClusterRoleBinding { + if in == nil { + return nil + } + out := new(ClusterRoleBinding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterRoleBinding) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterRoleBindingList) DeepCopyInto(out *ClusterRoleBindingList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterRoleBinding, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRoleBindingList. +func (in *ClusterRoleBindingList) DeepCopy() *ClusterRoleBindingList { + if in == nil { + return nil + } + out := new(ClusterRoleBindingList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterRoleBindingList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterRoleList) DeepCopyInto(out *ClusterRoleList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterRole, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRoleList. +func (in *ClusterRoleList) DeepCopy() *ClusterRoleList { + if in == nil { + return nil + } + out := new(ClusterRoleList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterRoleList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupRestriction) DeepCopyInto(out *GroupRestriction) { + *out = *in + if in.Groups != nil { + in, out := &in.Groups, &out.Groups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Selectors != nil { + in, out := &in.Selectors, &out.Selectors + *out = make([]metav1.LabelSelector, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupRestriction. +func (in *GroupRestriction) DeepCopy() *GroupRestriction { + if in == nil { + return nil + } + out := new(GroupRestriction) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IsPersonalSubjectAccessReview) DeepCopyInto(out *IsPersonalSubjectAccessReview) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IsPersonalSubjectAccessReview. +func (in *IsPersonalSubjectAccessReview) DeepCopy() *IsPersonalSubjectAccessReview { + if in == nil { + return nil + } + out := new(IsPersonalSubjectAccessReview) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IsPersonalSubjectAccessReview) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocalResourceAccessReview) DeepCopyInto(out *LocalResourceAccessReview) { + *out = *in + out.TypeMeta = in.TypeMeta + in.Action.DeepCopyInto(&out.Action) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalResourceAccessReview. +func (in *LocalResourceAccessReview) DeepCopy() *LocalResourceAccessReview { + if in == nil { + return nil + } + out := new(LocalResourceAccessReview) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LocalResourceAccessReview) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocalSubjectAccessReview) DeepCopyInto(out *LocalSubjectAccessReview) { + *out = *in + out.TypeMeta = in.TypeMeta + in.Action.DeepCopyInto(&out.Action) + if in.GroupsSlice != nil { + in, out := &in.GroupsSlice, &out.GroupsSlice + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make(OptionalScopes, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalSubjectAccessReview. +func (in *LocalSubjectAccessReview) DeepCopy() *LocalSubjectAccessReview { + if in == nil { + return nil + } + out := new(LocalSubjectAccessReview) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LocalSubjectAccessReview) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NamedClusterRole) DeepCopyInto(out *NamedClusterRole) { + *out = *in + in.Role.DeepCopyInto(&out.Role) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedClusterRole. +func (in *NamedClusterRole) DeepCopy() *NamedClusterRole { + if in == nil { + return nil + } + out := new(NamedClusterRole) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NamedClusterRoleBinding) DeepCopyInto(out *NamedClusterRoleBinding) { + *out = *in + in.RoleBinding.DeepCopyInto(&out.RoleBinding) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedClusterRoleBinding. +func (in *NamedClusterRoleBinding) DeepCopy() *NamedClusterRoleBinding { + if in == nil { + return nil + } + out := new(NamedClusterRoleBinding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NamedRole) DeepCopyInto(out *NamedRole) { + *out = *in + in.Role.DeepCopyInto(&out.Role) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedRole. +func (in *NamedRole) DeepCopy() *NamedRole { + if in == nil { + return nil + } + out := new(NamedRole) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NamedRoleBinding) DeepCopyInto(out *NamedRoleBinding) { + *out = *in + in.RoleBinding.DeepCopyInto(&out.RoleBinding) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedRoleBinding. +func (in *NamedRoleBinding) DeepCopy() *NamedRoleBinding { + if in == nil { + return nil + } + out := new(NamedRoleBinding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in OptionalNames) DeepCopyInto(out *OptionalNames) { + { + in := &in + *out = make(OptionalNames, len(*in)) + copy(*out, *in) + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OptionalNames. +func (in OptionalNames) DeepCopy() OptionalNames { + if in == nil { + return nil + } + out := new(OptionalNames) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in OptionalScopes) DeepCopyInto(out *OptionalScopes) { + { + in := &in + *out = make(OptionalScopes, len(*in)) + copy(*out, *in) + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OptionalScopes. +func (in OptionalScopes) DeepCopy() OptionalScopes { + if in == nil { + return nil + } + out := new(OptionalScopes) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyRule) DeepCopyInto(out *PolicyRule) { + *out = *in + if in.Verbs != nil { + in, out := &in.Verbs, &out.Verbs + *out = make([]string, len(*in)) + copy(*out, *in) + } + in.AttributeRestrictions.DeepCopyInto(&out.AttributeRestrictions) + if in.APIGroups != nil { + in, out := &in.APIGroups, &out.APIGroups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ResourceNames != nil { + in, out := &in.ResourceNames, &out.ResourceNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.NonResourceURLsSlice != nil { + in, out := &in.NonResourceURLsSlice, &out.NonResourceURLsSlice + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyRule. +func (in *PolicyRule) DeepCopy() *PolicyRule { + if in == nil { + return nil + } + out := new(PolicyRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceAccessReview) DeepCopyInto(out *ResourceAccessReview) { + *out = *in + out.TypeMeta = in.TypeMeta + in.Action.DeepCopyInto(&out.Action) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceAccessReview. +func (in *ResourceAccessReview) DeepCopy() *ResourceAccessReview { + if in == nil { + return nil + } + out := new(ResourceAccessReview) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourceAccessReview) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceAccessReviewResponse) DeepCopyInto(out *ResourceAccessReviewResponse) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.UsersSlice != nil { + in, out := &in.UsersSlice, &out.UsersSlice + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.GroupsSlice != nil { + in, out := &in.GroupsSlice, &out.GroupsSlice + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceAccessReviewResponse. +func (in *ResourceAccessReviewResponse) DeepCopy() *ResourceAccessReviewResponse { + if in == nil { + return nil + } + out := new(ResourceAccessReviewResponse) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourceAccessReviewResponse) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Role) DeepCopyInto(out *Role) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]PolicyRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Role. +func (in *Role) DeepCopy() *Role { + if in == nil { + return nil + } + out := new(Role) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Role) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoleBinding) DeepCopyInto(out *RoleBinding) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.UserNames != nil { + in, out := &in.UserNames, &out.UserNames + *out = make(OptionalNames, len(*in)) + copy(*out, *in) + } + if in.GroupNames != nil { + in, out := &in.GroupNames, &out.GroupNames + *out = make(OptionalNames, len(*in)) + copy(*out, *in) + } + if in.Subjects != nil { + in, out := &in.Subjects, &out.Subjects + *out = make([]corev1.ObjectReference, len(*in)) + copy(*out, *in) + } + out.RoleRef = in.RoleRef + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleBinding. +func (in *RoleBinding) DeepCopy() *RoleBinding { + if in == nil { + return nil + } + out := new(RoleBinding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RoleBinding) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoleBindingList) DeepCopyInto(out *RoleBindingList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]RoleBinding, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleBindingList. +func (in *RoleBindingList) DeepCopy() *RoleBindingList { + if in == nil { + return nil + } + out := new(RoleBindingList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RoleBindingList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoleBindingRestriction) DeepCopyInto(out *RoleBindingRestriction) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleBindingRestriction. +func (in *RoleBindingRestriction) DeepCopy() *RoleBindingRestriction { + if in == nil { + return nil + } + out := new(RoleBindingRestriction) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RoleBindingRestriction) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoleBindingRestrictionList) DeepCopyInto(out *RoleBindingRestrictionList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]RoleBindingRestriction, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleBindingRestrictionList. +func (in *RoleBindingRestrictionList) DeepCopy() *RoleBindingRestrictionList { + if in == nil { + return nil + } + out := new(RoleBindingRestrictionList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RoleBindingRestrictionList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoleBindingRestrictionSpec) DeepCopyInto(out *RoleBindingRestrictionSpec) { + *out = *in + if in.UserRestriction != nil { + in, out := &in.UserRestriction, &out.UserRestriction + *out = new(UserRestriction) + (*in).DeepCopyInto(*out) + } + if in.GroupRestriction != nil { + in, out := &in.GroupRestriction, &out.GroupRestriction + *out = new(GroupRestriction) + (*in).DeepCopyInto(*out) + } + if in.ServiceAccountRestriction != nil { + in, out := &in.ServiceAccountRestriction, &out.ServiceAccountRestriction + *out = new(ServiceAccountRestriction) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleBindingRestrictionSpec. +func (in *RoleBindingRestrictionSpec) DeepCopy() *RoleBindingRestrictionSpec { + if in == nil { + return nil + } + out := new(RoleBindingRestrictionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoleList) DeepCopyInto(out *RoleList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Role, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleList. +func (in *RoleList) DeepCopy() *RoleList { + if in == nil { + return nil + } + out := new(RoleList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RoleList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SelfSubjectRulesReview) DeepCopyInto(out *SelfSubjectRulesReview) { + *out = *in + out.TypeMeta = in.TypeMeta + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfSubjectRulesReview. +func (in *SelfSubjectRulesReview) DeepCopy() *SelfSubjectRulesReview { + if in == nil { + return nil + } + out := new(SelfSubjectRulesReview) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SelfSubjectRulesReview) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SelfSubjectRulesReviewSpec) DeepCopyInto(out *SelfSubjectRulesReviewSpec) { + *out = *in + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make(OptionalScopes, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfSubjectRulesReviewSpec. +func (in *SelfSubjectRulesReviewSpec) DeepCopy() *SelfSubjectRulesReviewSpec { + if in == nil { + return nil + } + out := new(SelfSubjectRulesReviewSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountReference) DeepCopyInto(out *ServiceAccountReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountReference. +func (in *ServiceAccountReference) DeepCopy() *ServiceAccountReference { + if in == nil { + return nil + } + out := new(ServiceAccountReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountRestriction) DeepCopyInto(out *ServiceAccountRestriction) { + *out = *in + if in.ServiceAccounts != nil { + in, out := &in.ServiceAccounts, &out.ServiceAccounts + *out = make([]ServiceAccountReference, len(*in)) + copy(*out, *in) + } + if in.Namespaces != nil { + in, out := &in.Namespaces, &out.Namespaces + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountRestriction. +func (in *ServiceAccountRestriction) DeepCopy() *ServiceAccountRestriction { + if in == nil { + return nil + } + out := new(ServiceAccountRestriction) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubjectAccessReview) DeepCopyInto(out *SubjectAccessReview) { + *out = *in + out.TypeMeta = in.TypeMeta + in.Action.DeepCopyInto(&out.Action) + if in.GroupsSlice != nil { + in, out := &in.GroupsSlice, &out.GroupsSlice + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make(OptionalScopes, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubjectAccessReview. +func (in *SubjectAccessReview) DeepCopy() *SubjectAccessReview { + if in == nil { + return nil + } + out := new(SubjectAccessReview) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SubjectAccessReview) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubjectAccessReviewResponse) DeepCopyInto(out *SubjectAccessReviewResponse) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubjectAccessReviewResponse. +func (in *SubjectAccessReviewResponse) DeepCopy() *SubjectAccessReviewResponse { + if in == nil { + return nil + } + out := new(SubjectAccessReviewResponse) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SubjectAccessReviewResponse) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubjectRulesReview) DeepCopyInto(out *SubjectRulesReview) { + *out = *in + out.TypeMeta = in.TypeMeta + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubjectRulesReview. +func (in *SubjectRulesReview) DeepCopy() *SubjectRulesReview { + if in == nil { + return nil + } + out := new(SubjectRulesReview) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SubjectRulesReview) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubjectRulesReviewSpec) DeepCopyInto(out *SubjectRulesReviewSpec) { + *out = *in + if in.Groups != nil { + in, out := &in.Groups, &out.Groups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make(OptionalScopes, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubjectRulesReviewSpec. +func (in *SubjectRulesReviewSpec) DeepCopy() *SubjectRulesReviewSpec { + if in == nil { + return nil + } + out := new(SubjectRulesReviewSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubjectRulesReviewStatus) DeepCopyInto(out *SubjectRulesReviewStatus) { + *out = *in + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]PolicyRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubjectRulesReviewStatus. +func (in *SubjectRulesReviewStatus) DeepCopy() *SubjectRulesReviewStatus { + if in == nil { + return nil + } + out := new(SubjectRulesReviewStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserRestriction) DeepCopyInto(out *UserRestriction) { + *out = *in + if in.Users != nil { + in, out := &in.Users, &out.Users + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Groups != nil { + in, out := &in.Groups, &out.Groups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Selectors != nil { + in, out := &in.Selectors, &out.Selectors + *out = make([]metav1.LabelSelector, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserRestriction. +func (in *UserRestriction) DeepCopy() *UserRestriction { + if in == nil { + return nil + } + out := new(UserRestriction) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/api/authorization/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/authorization/v1/zz_generated.swagger_doc_generated.go new file mode 100644 index 000000000..34777dc95 --- /dev/null +++ b/vendor/github.com/openshift/api/authorization/v1/zz_generated.swagger_doc_generated.go @@ -0,0 +1,364 @@ +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_Action = map[string]string{ + "": "Action describes a request to the API server", + "namespace": "Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces", + "verb": "Verb is one of: get, list, watch, create, update, delete", + "resourceAPIGroup": "Group is the API group of the resource Serialized as resourceAPIGroup to avoid confusion with the 'groups' field when inlined", + "resourceAPIVersion": "Version is the API version of the resource Serialized as resourceAPIVersion to avoid confusion with TypeMeta.apiVersion and ObjectMeta.resourceVersion when inlined", + "resource": "Resource is one of the existing resource types", + "resourceName": "ResourceName is the name of the resource being requested for a \"get\" or deleted for a \"delete\"", + "path": "Path is the path of a non resource URL", + "isNonResourceURL": "IsNonResourceURL is true if this is a request for a non-resource URL (outside of the resource hierarchy)", + "content": "Content is the actual content of the request for create and update", +} + +func (Action) SwaggerDoc() map[string]string { + return map_Action +} + +var map_ClusterRole = map[string]string{ + "": "ClusterRole is a logical grouping of PolicyRules that can be referenced as a unit by ClusterRoleBindings.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "rules": "Rules holds all the PolicyRules for this ClusterRole", + "aggregationRule": "AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be stomped by the controller.", +} + +func (ClusterRole) SwaggerDoc() map[string]string { + return map_ClusterRole +} + +var map_ClusterRoleBinding = map[string]string{ + "": "ClusterRoleBinding references a ClusterRole, but not contain it. It can reference any ClusterRole in the same namespace or in the global namespace. It adds who information via (Users and Groups) OR Subjects and namespace information by which namespace it exists in. ClusterRoleBindings in a given namespace only have effect in that namespace (excepting the master namespace which has power in all namespaces).\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "userNames": "UserNames holds all the usernames directly bound to the role. This field should only be specified when supporting legacy clients and servers. See Subjects for further details.", + "groupNames": "GroupNames holds all the groups directly bound to the role. This field should only be specified when supporting legacy clients and servers. See Subjects for further details.", + "subjects": "Subjects hold object references to authorize with this rule. This field is ignored if UserNames or GroupNames are specified to support legacy clients and servers. Thus newer clients that do not need to support backwards compatibility should send only fully qualified Subjects and should omit the UserNames and GroupNames fields. Clients that need to support backwards compatibility can use this field to build the UserNames and GroupNames.", + "roleRef": "RoleRef can only reference the current namespace and the global namespace. If the ClusterRoleRef cannot be resolved, the Authorizer must return an error. Since Policy is a singleton, this is sufficient knowledge to locate a role.", +} + +func (ClusterRoleBinding) SwaggerDoc() map[string]string { + return map_ClusterRoleBinding +} + +var map_ClusterRoleBindingList = map[string]string{ + "": "ClusterRoleBindingList is a collection of ClusterRoleBindings\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "Items is a list of ClusterRoleBindings", +} + +func (ClusterRoleBindingList) SwaggerDoc() map[string]string { + return map_ClusterRoleBindingList +} + +var map_ClusterRoleList = map[string]string{ + "": "ClusterRoleList is a collection of ClusterRoles\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "Items is a list of ClusterRoles", +} + +func (ClusterRoleList) SwaggerDoc() map[string]string { + return map_ClusterRoleList +} + +var map_GroupRestriction = map[string]string{ + "": "GroupRestriction matches a group either by a string match on the group name or a label selector applied to group labels.", + "groups": "Groups is a list of groups used to match against an individual user's groups. If the user is a member of one of the whitelisted groups, the user is allowed to be bound to a role.", + "labels": "Selectors specifies a list of label selectors over group labels.", +} + +func (GroupRestriction) SwaggerDoc() map[string]string { + return map_GroupRestriction +} + +var map_IsPersonalSubjectAccessReview = map[string]string{ + "": "IsPersonalSubjectAccessReview is a marker for PolicyRule.AttributeRestrictions that denotes that subjectaccessreviews on self should be allowed\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", +} + +func (IsPersonalSubjectAccessReview) SwaggerDoc() map[string]string { + return map_IsPersonalSubjectAccessReview +} + +var map_LocalResourceAccessReview = map[string]string{ + "": "LocalResourceAccessReview is a means to request a list of which users and groups are authorized to perform the action specified by spec in a particular namespace\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", +} + +func (LocalResourceAccessReview) SwaggerDoc() map[string]string { + return map_LocalResourceAccessReview +} + +var map_LocalSubjectAccessReview = map[string]string{ + "": "LocalSubjectAccessReview is an object for requesting information about whether a user or group can perform an action in a particular namespace\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "user": "User is optional. If both User and Groups are empty, the current authenticated user is used.", + "groups": "Groups is optional. Groups is the list of groups to which the User belongs.", + "scopes": "Scopes to use for the evaluation. Empty means \"use the unscoped (full) permissions of the user/groups\". Nil for a self-SAR, means \"use the scopes on this request\". Nil for a regular SAR, means the same as empty.", +} + +func (LocalSubjectAccessReview) SwaggerDoc() map[string]string { + return map_LocalSubjectAccessReview +} + +var map_NamedClusterRole = map[string]string{ + "": "NamedClusterRole relates a name with a cluster role", + "name": "Name is the name of the cluster role", + "role": "Role is the cluster role being named", +} + +func (NamedClusterRole) SwaggerDoc() map[string]string { + return map_NamedClusterRole +} + +var map_NamedClusterRoleBinding = map[string]string{ + "": "NamedClusterRoleBinding relates a name with a cluster role binding", + "name": "Name is the name of the cluster role binding", + "roleBinding": "RoleBinding is the cluster role binding being named", +} + +func (NamedClusterRoleBinding) SwaggerDoc() map[string]string { + return map_NamedClusterRoleBinding +} + +var map_NamedRole = map[string]string{ + "": "NamedRole relates a Role with a name", + "name": "Name is the name of the role", + "role": "Role is the role being named", +} + +func (NamedRole) SwaggerDoc() map[string]string { + return map_NamedRole +} + +var map_NamedRoleBinding = map[string]string{ + "": "NamedRoleBinding relates a role binding with a name", + "name": "Name is the name of the role binding", + "roleBinding": "RoleBinding is the role binding being named", +} + +func (NamedRoleBinding) SwaggerDoc() map[string]string { + return map_NamedRoleBinding +} + +var map_PolicyRule = map[string]string{ + "": "PolicyRule holds information that describes a policy rule, but does not contain information about who the rule applies to or which namespace the rule applies to.", + "verbs": "Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds.", + "attributeRestrictions": "AttributeRestrictions will vary depending on what the Authorizer/AuthorizationAttributeBuilder pair supports. If the Authorizer does not recognize how to handle the AttributeRestrictions, the Authorizer should report an error.", + "apiGroups": "APIGroups is the name of the APIGroup that contains the resources. If this field is empty, then both kubernetes and origin API groups are assumed. That means that if an action is requested against one of the enumerated resources in either the kubernetes or the origin API group, the request will be allowed", + "resources": "Resources is a list of resources this rule applies to. ResourceAll represents all resources.", + "resourceNames": "ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.", + "nonResourceURLs": "NonResourceURLsSlice is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path This name is intentionally different than the internal type so that the DefaultConvert works nicely and because the ordering may be different.", +} + +func (PolicyRule) SwaggerDoc() map[string]string { + return map_PolicyRule +} + +var map_ResourceAccessReview = map[string]string{ + "": "ResourceAccessReview is a means to request a list of which users and groups are authorized to perform the action specified by spec\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", +} + +func (ResourceAccessReview) SwaggerDoc() map[string]string { + return map_ResourceAccessReview +} + +var map_ResourceAccessReviewResponse = map[string]string{ + "": "ResourceAccessReviewResponse describes who can perform the action\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "namespace": "Namespace is the namespace used for the access review", + "users": "UsersSlice is the list of users who can perform the action", + "groups": "GroupsSlice is the list of groups who can perform the action", + "evalutionError": "EvaluationError is an indication that some error occurred during resolution, but partial results can still be returned. It is entirely possible to get an error and be able to continue determine authorization status in spite of it. This is most common when a bound role is missing, but enough roles are still present and bound to reason about the request.", +} + +func (ResourceAccessReviewResponse) SwaggerDoc() map[string]string { + return map_ResourceAccessReviewResponse +} + +var map_Role = map[string]string{ + "": "Role is a logical grouping of PolicyRules that can be referenced as a unit by RoleBindings.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "rules": "Rules holds all the PolicyRules for this Role", +} + +func (Role) SwaggerDoc() map[string]string { + return map_Role +} + +var map_RoleBinding = map[string]string{ + "": "RoleBinding references a Role, but not contain it. It can reference any Role in the same namespace or in the global namespace. It adds who information via (Users and Groups) OR Subjects and namespace information by which namespace it exists in. RoleBindings in a given namespace only have effect in that namespace (excepting the master namespace which has power in all namespaces).\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "userNames": "UserNames holds all the usernames directly bound to the role. This field should only be specified when supporting legacy clients and servers. See Subjects for further details.", + "groupNames": "GroupNames holds all the groups directly bound to the role. This field should only be specified when supporting legacy clients and servers. See Subjects for further details.", + "subjects": "Subjects hold object references to authorize with this rule. This field is ignored if UserNames or GroupNames are specified to support legacy clients and servers. Thus newer clients that do not need to support backwards compatibility should send only fully qualified Subjects and should omit the UserNames and GroupNames fields. Clients that need to support backwards compatibility can use this field to build the UserNames and GroupNames.", + "roleRef": "RoleRef can only reference the current namespace and the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error. Since Policy is a singleton, this is sufficient knowledge to locate a role.", +} + +func (RoleBinding) SwaggerDoc() map[string]string { + return map_RoleBinding +} + +var map_RoleBindingList = map[string]string{ + "": "RoleBindingList is a collection of RoleBindings\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "Items is a list of RoleBindings", +} + +func (RoleBindingList) SwaggerDoc() map[string]string { + return map_RoleBindingList +} + +var map_RoleBindingRestriction = map[string]string{ + "": "RoleBindingRestriction is an object that can be matched against a subject (user, group, or service account) to determine whether rolebindings on that subject are allowed in the namespace to which the RoleBindingRestriction belongs. If any one of those RoleBindingRestriction objects matches a subject, rolebindings on that subject in the namespace are allowed.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Spec defines the matcher.", +} + +func (RoleBindingRestriction) SwaggerDoc() map[string]string { + return map_RoleBindingRestriction +} + +var map_RoleBindingRestrictionList = map[string]string{ + "": "RoleBindingRestrictionList is a collection of RoleBindingRestriction objects.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "Items is a list of RoleBindingRestriction objects.", +} + +func (RoleBindingRestrictionList) SwaggerDoc() map[string]string { + return map_RoleBindingRestrictionList +} + +var map_RoleBindingRestrictionSpec = map[string]string{ + "": "RoleBindingRestrictionSpec defines a rolebinding restriction. Exactly one field must be non-nil.", + "userrestriction": "UserRestriction matches against user subjects.", + "grouprestriction": "GroupRestriction matches against group subjects.", + "serviceaccountrestriction": "ServiceAccountRestriction matches against service-account subjects.", +} + +func (RoleBindingRestrictionSpec) SwaggerDoc() map[string]string { + return map_RoleBindingRestrictionSpec +} + +var map_RoleList = map[string]string{ + "": "RoleList is a collection of Roles\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "Items is a list of Roles", +} + +func (RoleList) SwaggerDoc() map[string]string { + return map_RoleList +} + +var map_SelfSubjectRulesReview = map[string]string{ + "": "SelfSubjectRulesReview is a resource you can create to determine which actions you can perform in a namespace\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "spec": "Spec adds information about how to conduct the check", + "status": "Status is completed by the server to tell which permissions you have", +} + +func (SelfSubjectRulesReview) SwaggerDoc() map[string]string { + return map_SelfSubjectRulesReview +} + +var map_SelfSubjectRulesReviewSpec = map[string]string{ + "": "SelfSubjectRulesReviewSpec adds information about how to conduct the check", + "scopes": "Scopes to use for the evaluation. Empty means \"use the unscoped (full) permissions of the user/groups\". Nil means \"use the scopes on this request\".", +} + +func (SelfSubjectRulesReviewSpec) SwaggerDoc() map[string]string { + return map_SelfSubjectRulesReviewSpec +} + +var map_ServiceAccountReference = map[string]string{ + "": "ServiceAccountReference specifies a service account and namespace by their names.", + "name": "Name is the name of the service account.", + "namespace": "Namespace is the namespace of the service account. Service accounts from inside the whitelisted namespaces are allowed to be bound to roles. If Namespace is empty, then the namespace of the RoleBindingRestriction in which the ServiceAccountReference is embedded is used.", +} + +func (ServiceAccountReference) SwaggerDoc() map[string]string { + return map_ServiceAccountReference +} + +var map_ServiceAccountRestriction = map[string]string{ + "": "ServiceAccountRestriction matches a service account by a string match on either the service-account name or the name of the service account's namespace.", + "serviceaccounts": "ServiceAccounts specifies a list of literal service-account names.", + "namespaces": "Namespaces specifies a list of literal namespace names.", +} + +func (ServiceAccountRestriction) SwaggerDoc() map[string]string { + return map_ServiceAccountRestriction +} + +var map_SubjectAccessReview = map[string]string{ + "": "SubjectAccessReview is an object for requesting information about whether a user or group can perform an action\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "user": "User is optional. If both User and Groups are empty, the current authenticated user is used.", + "groups": "GroupsSlice is optional. Groups is the list of groups to which the User belongs.", + "scopes": "Scopes to use for the evaluation. Empty means \"use the unscoped (full) permissions of the user/groups\". Nil for a self-SAR, means \"use the scopes on this request\". Nil for a regular SAR, means the same as empty.", +} + +func (SubjectAccessReview) SwaggerDoc() map[string]string { + return map_SubjectAccessReview +} + +var map_SubjectAccessReviewResponse = map[string]string{ + "": "SubjectAccessReviewResponse describes whether or not a user or group can perform an action\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "namespace": "Namespace is the namespace used for the access review", + "allowed": "Allowed is required. True if the action would be allowed, false otherwise.", + "reason": "Reason is optional. It indicates why a request was allowed or denied.", + "evaluationError": "EvaluationError is an indication that some error occurred during the authorization check. It is entirely possible to get an error and be able to continue determine authorization status in spite of it. This is most common when a bound role is missing, but enough roles are still present and bound to reason about the request.", +} + +func (SubjectAccessReviewResponse) SwaggerDoc() map[string]string { + return map_SubjectAccessReviewResponse +} + +var map_SubjectRulesReview = map[string]string{ + "": "SubjectRulesReview is a resource you can create to determine which actions another user can perform in a namespace\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "spec": "Spec adds information about how to conduct the check", + "status": "Status is completed by the server to tell which permissions you have", +} + +func (SubjectRulesReview) SwaggerDoc() map[string]string { + return map_SubjectRulesReview +} + +var map_SubjectRulesReviewSpec = map[string]string{ + "": "SubjectRulesReviewSpec adds information about how to conduct the check", + "user": "User is optional. At least one of User and Groups must be specified.", + "groups": "Groups is optional. Groups is the list of groups to which the User belongs. At least one of User and Groups must be specified.", + "scopes": "Scopes to use for the evaluation. Empty means \"use the unscoped (full) permissions of the user/groups\".", +} + +func (SubjectRulesReviewSpec) SwaggerDoc() map[string]string { + return map_SubjectRulesReviewSpec +} + +var map_SubjectRulesReviewStatus = map[string]string{ + "": "SubjectRulesReviewStatus is contains the result of a rules check", + "rules": "Rules is the list of rules (no particular sort) that are allowed for the subject", + "evaluationError": "EvaluationError can appear in combination with Rules. It means some error happened during evaluation that may have prevented additional rules from being populated.", +} + +func (SubjectRulesReviewStatus) SwaggerDoc() map[string]string { + return map_SubjectRulesReviewStatus +} + +var map_UserRestriction = map[string]string{ + "": "UserRestriction matches a user either by a string match on the user name, a string match on the name of a group to which the user belongs, or a label selector applied to the user labels.", + "users": "Users specifies a list of literal user names.", + "groups": "Groups specifies a list of literal group names.", + "labels": "Selectors specifies a list of label selectors over user labels.", +} + +func (UserRestriction) SwaggerDoc() map[string]string { + return map_UserRestriction +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/api/build/OWNERS b/vendor/github.com/openshift/api/build/OWNERS new file mode 100644 index 000000000..c1ece8b21 --- /dev/null +++ b/vendor/github.com/openshift/api/build/OWNERS @@ -0,0 +1,4 @@ +reviewers: + - bparees + - gabemontero + - jim-minter diff --git a/vendor/github.com/openshift/api/build/install.go b/vendor/github.com/openshift/api/build/install.go new file mode 100644 index 000000000..87e2c26b0 --- /dev/null +++ b/vendor/github.com/openshift/api/build/install.go @@ -0,0 +1,26 @@ +package build + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + buildv1 "github.com/openshift/api/build/v1" +) + +const ( + GroupName = "build.openshift.io" +) + +var ( + schemeBuilder = runtime.NewSchemeBuilder(buildv1.Install) + // Install is a function which adds every version of this group to a scheme + Install = schemeBuilder.AddToScheme +) + +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +func Kind(kind string) schema.GroupKind { + return schema.GroupKind{Group: GroupName, Kind: kind} +} diff --git a/vendor/github.com/openshift/api/build/v1/consts.go b/vendor/github.com/openshift/api/build/v1/consts.go new file mode 100644 index 000000000..3310b9e0a --- /dev/null +++ b/vendor/github.com/openshift/api/build/v1/consts.go @@ -0,0 +1,200 @@ +package v1 + +// annotations +const ( + // BuildAnnotation is an annotation that identifies a Pod as being for a Build + BuildAnnotation = "openshift.io/build.name" + + // BuildConfigAnnotation is an annotation that identifies the BuildConfig that a Build was created from + BuildConfigAnnotation = "openshift.io/build-config.name" + + // BuildCloneAnnotation is an annotation whose value is the name of the build this build was cloned from + BuildCloneAnnotation = "openshift.io/build.clone-of" + + // BuildNumberAnnotation is an annotation whose value is the sequential number for this Build + BuildNumberAnnotation = "openshift.io/build.number" + + // BuildPodNameAnnotation is an annotation whose value is the name of the pod running this build + BuildPodNameAnnotation = "openshift.io/build.pod-name" + + // BuildJenkinsStatusJSONAnnotation is an annotation holding the Jenkins status information + BuildJenkinsStatusJSONAnnotation = "openshift.io/jenkins-status-json" + + // BuildJenkinsLogURLAnnotation is an annotation holding a link to the raw Jenkins build console log + BuildJenkinsLogURLAnnotation = "openshift.io/jenkins-log-url" + + // BuildJenkinsConsoleLogURLAnnotation is an annotation holding a link to the Jenkins build console log (including Jenkins chrome wrappering) + BuildJenkinsConsoleLogURLAnnotation = "openshift.io/jenkins-console-log-url" + + // BuildJenkinsBlueOceanLogURLAnnotation is an annotation holding a link to the Jenkins build console log via the Jenkins BlueOcean UI Plugin + BuildJenkinsBlueOceanLogURLAnnotation = "openshift.io/jenkins-blueocean-log-url" + + // BuildJenkinsBuildURIAnnotation is an annotation holding a link to the Jenkins build + BuildJenkinsBuildURIAnnotation = "openshift.io/jenkins-build-uri" + + // BuildSourceSecretMatchURIAnnotationPrefix is a prefix for annotations on a Secret which indicate a source URI against which the Secret can be used + BuildSourceSecretMatchURIAnnotationPrefix = "build.openshift.io/source-secret-match-uri-" + + // BuildConfigPausedAnnotation is an annotation that marks a BuildConfig as paused. + // New Builds cannot be instantiated from a paused BuildConfig. + BuildConfigPausedAnnotation = "openshift.io/build-config.paused" +) + +// labels +const ( + // BuildConfigLabel is the key of a Build label whose value is the ID of a BuildConfig + // on which the Build is based. NOTE: The value for this label may not contain the entire + // BuildConfig name because it will be truncated to maximum label length. + BuildConfigLabel = "openshift.io/build-config.name" + + // BuildLabel is the key of a Pod label whose value is the Name of a Build which is run. + // NOTE: The value for this label may not contain the entire Build name because it will be + // truncated to maximum label length. + BuildLabel = "openshift.io/build.name" + + // BuildRunPolicyLabel represents the start policy used to start the build. + BuildRunPolicyLabel = "openshift.io/build.start-policy" + + // BuildConfigLabelDeprecated was used as BuildConfigLabel before adding namespaces. + // We keep it for backward compatibility. + BuildConfigLabelDeprecated = "buildconfig" +) + +const ( + // StatusReasonError is a generic reason for a build error condition. + StatusReasonError StatusReason = "Error" + + // StatusReasonCannotCreateBuildPodSpec is an error condition when the build + // strategy cannot create a build pod spec. + StatusReasonCannotCreateBuildPodSpec StatusReason = "CannotCreateBuildPodSpec" + + // StatusReasonCannotCreateBuildPod is an error condition when a build pod + // cannot be created. + StatusReasonCannotCreateBuildPod StatusReason = "CannotCreateBuildPod" + + // StatusReasonInvalidOutputReference is an error condition when the build + // output is an invalid reference. + StatusReasonInvalidOutputReference StatusReason = "InvalidOutputReference" + + // StatusReasonInvalidImageReference is an error condition when the build + // references an invalid image. + StatusReasonInvalidImageReference StatusReason = "InvalidImageReference" + + // StatusReasonCancelBuildFailed is an error condition when cancelling a build + // fails. + StatusReasonCancelBuildFailed StatusReason = "CancelBuildFailed" + + // StatusReasonBuildPodDeleted is an error condition when the build pod is + // deleted before build completion. + StatusReasonBuildPodDeleted StatusReason = "BuildPodDeleted" + + // StatusReasonExceededRetryTimeout is an error condition when the build has + // not completed and retrying the build times out. + StatusReasonExceededRetryTimeout StatusReason = "ExceededRetryTimeout" + + // StatusReasonMissingPushSecret indicates that the build is missing required + // secret for pushing the output image. + // The build will stay in the pending state until the secret is created, or the build times out. + StatusReasonMissingPushSecret StatusReason = "MissingPushSecret" + + // StatusReasonPostCommitHookFailed indicates the post-commit hook failed. + StatusReasonPostCommitHookFailed StatusReason = "PostCommitHookFailed" + + // StatusReasonPushImageToRegistryFailed indicates that an image failed to be + // pushed to the registry. + StatusReasonPushImageToRegistryFailed StatusReason = "PushImageToRegistryFailed" + + // StatusReasonPullBuilderImageFailed indicates that we failed to pull the + // builder image. + StatusReasonPullBuilderImageFailed StatusReason = "PullBuilderImageFailed" + + // StatusReasonFetchSourceFailed indicates that fetching the source of the + // build has failed. + StatusReasonFetchSourceFailed StatusReason = "FetchSourceFailed" + + // StatusReasonFetchImageContentFailed indicates that the fetching of an image and extracting + // its contents for inclusion in the build has failed. + StatusReasonFetchImageContentFailed StatusReason = "FetchImageContentFailed" + + // StatusReasonManageDockerfileFailed indicates that the set up of the Dockerfile for the build + // has failed. + StatusReasonManageDockerfileFailed StatusReason = "ManageDockerfileFailed" + + // StatusReasonInvalidContextDirectory indicates that the supplied + // contextDir does not exist + StatusReasonInvalidContextDirectory StatusReason = "InvalidContextDirectory" + + // StatusReasonCancelledBuild indicates that the build was cancelled by the + // user. + StatusReasonCancelledBuild StatusReason = "CancelledBuild" + + // StatusReasonDockerBuildFailed indicates that the container image build strategy has + // failed. + StatusReasonDockerBuildFailed StatusReason = "DockerBuildFailed" + + // StatusReasonBuildPodExists indicates that the build tried to create a + // build pod but one was already present. + StatusReasonBuildPodExists StatusReason = "BuildPodExists" + + // StatusReasonNoBuildContainerStatus indicates that the build failed because the + // the build pod has no container statuses. + StatusReasonNoBuildContainerStatus StatusReason = "NoBuildContainerStatus" + + // StatusReasonFailedContainer indicates that the pod for the build has at least + // one container with a non-zero exit status. + StatusReasonFailedContainer StatusReason = "FailedContainer" + + // StatusReasonUnresolvableEnvironmentVariable indicates that an error occurred processing + // the supplied options for environment variables in the build strategy environment + StatusReasonUnresolvableEnvironmentVariable StatusReason = "UnresolvableEnvironmentVariable" + + // StatusReasonGenericBuildFailed is the reason associated with a broad + // range of build failures. + StatusReasonGenericBuildFailed StatusReason = "GenericBuildFailed" + + // StatusReasonOutOfMemoryKilled indicates that the build pod was killed for its memory consumption + StatusReasonOutOfMemoryKilled StatusReason = "OutOfMemoryKilled" + + // StatusReasonCannotRetrieveServiceAccount is the reason associated with a failure + // to look up the service account associated with the BuildConfig. + StatusReasonCannotRetrieveServiceAccount StatusReason = "CannotRetrieveServiceAccount" + + // StatusReasonBuildPodEvicted is the reason a build fails due to the build pod being evicted + // from its node + StatusReasonBuildPodEvicted StatusReason = "BuildPodEvicted" +) + +// env vars +// WhitelistEnvVarNames is a list of special env vars allows s2i containers +var WhitelistEnvVarNames = []string{"BUILD_LOGLEVEL", "GIT_SSL_NO_VERIFY", "HTTP_PROXY", "HTTPS_PROXY", "LANG", "NO_PROXY"} + +// env vars +const ( + + // CustomBuildStrategyBaseImageKey is the environment variable that indicates the base image to be used when + // performing a custom build, if needed. + CustomBuildStrategyBaseImageKey = "OPENSHIFT_CUSTOM_BUILD_BASE_IMAGE" + + // AllowedUIDs is an environment variable that contains ranges of UIDs that are allowed in + // Source builder images + AllowedUIDs = "ALLOWED_UIDS" + // DropCapabilities is an environment variable that contains a list of capabilities to drop when + // executing a Source build + DropCapabilities = "DROP_CAPS" +) + +// keys inside of secrets and configmaps +const ( + // WebHookSecretKey is the key used to identify the value containing the webhook invocation + // secret within a secret referenced by a webhook trigger. + WebHookSecretKey = "WebHookSecretKey" + + // RegistryConfKey is the ConfigMap key for the build pod's registry configuration file. + RegistryConfKey = "registries.conf" + + // SignaturePolicyKey is the ConfigMap key for the build pod's image signature policy file. + SignaturePolicyKey = "policy.json" + + // ServiceCAKey is the ConfigMap key for the service signing certificate authority mounted into build pods. + ServiceCAKey = "service-ca.crt" +) diff --git a/vendor/github.com/openshift/api/build/v1/doc.go b/vendor/github.com/openshift/api/build/v1/doc.go new file mode 100644 index 000000000..9bc16f64b --- /dev/null +++ b/vendor/github.com/openshift/api/build/v1/doc.go @@ -0,0 +1,8 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=github.com/openshift/origin/pkg/build/apis/build +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true + +// +groupName=build.openshift.io +// Package v1 is the v1 version of the API. +package v1 diff --git a/vendor/github.com/openshift/api/build/v1/generated.pb.go b/vendor/github.com/openshift/api/build/v1/generated.pb.go new file mode 100644 index 000000000..1b026f354 --- /dev/null +++ b/vendor/github.com/openshift/api/build/v1/generated.pb.go @@ -0,0 +1,17545 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/openshift/api/build/v1/generated.proto + +package v1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + v11 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" + time "time" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf +var _ = time.Kitchen + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *BinaryBuildRequestOptions) Reset() { *m = BinaryBuildRequestOptions{} } +func (*BinaryBuildRequestOptions) ProtoMessage() {} +func (*BinaryBuildRequestOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{0} +} +func (m *BinaryBuildRequestOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BinaryBuildRequestOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BinaryBuildRequestOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_BinaryBuildRequestOptions.Merge(m, src) +} +func (m *BinaryBuildRequestOptions) XXX_Size() int { + return m.Size() +} +func (m *BinaryBuildRequestOptions) XXX_DiscardUnknown() { + xxx_messageInfo_BinaryBuildRequestOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_BinaryBuildRequestOptions proto.InternalMessageInfo + +func (m *BinaryBuildSource) Reset() { *m = BinaryBuildSource{} } +func (*BinaryBuildSource) ProtoMessage() {} +func (*BinaryBuildSource) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{1} +} +func (m *BinaryBuildSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BinaryBuildSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BinaryBuildSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_BinaryBuildSource.Merge(m, src) +} +func (m *BinaryBuildSource) XXX_Size() int { + return m.Size() +} +func (m *BinaryBuildSource) XXX_DiscardUnknown() { + xxx_messageInfo_BinaryBuildSource.DiscardUnknown(m) +} + +var xxx_messageInfo_BinaryBuildSource proto.InternalMessageInfo + +func (m *BitbucketWebHookCause) Reset() { *m = BitbucketWebHookCause{} } +func (*BitbucketWebHookCause) ProtoMessage() {} +func (*BitbucketWebHookCause) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{2} +} +func (m *BitbucketWebHookCause) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BitbucketWebHookCause) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BitbucketWebHookCause) XXX_Merge(src proto.Message) { + xxx_messageInfo_BitbucketWebHookCause.Merge(m, src) +} +func (m *BitbucketWebHookCause) XXX_Size() int { + return m.Size() +} +func (m *BitbucketWebHookCause) XXX_DiscardUnknown() { + xxx_messageInfo_BitbucketWebHookCause.DiscardUnknown(m) +} + +var xxx_messageInfo_BitbucketWebHookCause proto.InternalMessageInfo + +func (m *Build) Reset() { *m = Build{} } +func (*Build) ProtoMessage() {} +func (*Build) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{3} +} +func (m *Build) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Build) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Build) XXX_Merge(src proto.Message) { + xxx_messageInfo_Build.Merge(m, src) +} +func (m *Build) XXX_Size() int { + return m.Size() +} +func (m *Build) XXX_DiscardUnknown() { + xxx_messageInfo_Build.DiscardUnknown(m) +} + +var xxx_messageInfo_Build proto.InternalMessageInfo + +func (m *BuildCondition) Reset() { *m = BuildCondition{} } +func (*BuildCondition) ProtoMessage() {} +func (*BuildCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{4} +} +func (m *BuildCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuildCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BuildCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildCondition.Merge(m, src) +} +func (m *BuildCondition) XXX_Size() int { + return m.Size() +} +func (m *BuildCondition) XXX_DiscardUnknown() { + xxx_messageInfo_BuildCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildCondition proto.InternalMessageInfo + +func (m *BuildConfig) Reset() { *m = BuildConfig{} } +func (*BuildConfig) ProtoMessage() {} +func (*BuildConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{5} +} +func (m *BuildConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuildConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BuildConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildConfig.Merge(m, src) +} +func (m *BuildConfig) XXX_Size() int { + return m.Size() +} +func (m *BuildConfig) XXX_DiscardUnknown() { + xxx_messageInfo_BuildConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildConfig proto.InternalMessageInfo + +func (m *BuildConfigList) Reset() { *m = BuildConfigList{} } +func (*BuildConfigList) ProtoMessage() {} +func (*BuildConfigList) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{6} +} +func (m *BuildConfigList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuildConfigList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BuildConfigList) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildConfigList.Merge(m, src) +} +func (m *BuildConfigList) XXX_Size() int { + return m.Size() +} +func (m *BuildConfigList) XXX_DiscardUnknown() { + xxx_messageInfo_BuildConfigList.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildConfigList proto.InternalMessageInfo + +func (m *BuildConfigSpec) Reset() { *m = BuildConfigSpec{} } +func (*BuildConfigSpec) ProtoMessage() {} +func (*BuildConfigSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{7} +} +func (m *BuildConfigSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuildConfigSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BuildConfigSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildConfigSpec.Merge(m, src) +} +func (m *BuildConfigSpec) XXX_Size() int { + return m.Size() +} +func (m *BuildConfigSpec) XXX_DiscardUnknown() { + xxx_messageInfo_BuildConfigSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildConfigSpec proto.InternalMessageInfo + +func (m *BuildConfigStatus) Reset() { *m = BuildConfigStatus{} } +func (*BuildConfigStatus) ProtoMessage() {} +func (*BuildConfigStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{8} +} +func (m *BuildConfigStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuildConfigStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BuildConfigStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildConfigStatus.Merge(m, src) +} +func (m *BuildConfigStatus) XXX_Size() int { + return m.Size() +} +func (m *BuildConfigStatus) XXX_DiscardUnknown() { + xxx_messageInfo_BuildConfigStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildConfigStatus proto.InternalMessageInfo + +func (m *BuildList) Reset() { *m = BuildList{} } +func (*BuildList) ProtoMessage() {} +func (*BuildList) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{9} +} +func (m *BuildList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuildList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BuildList) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildList.Merge(m, src) +} +func (m *BuildList) XXX_Size() int { + return m.Size() +} +func (m *BuildList) XXX_DiscardUnknown() { + xxx_messageInfo_BuildList.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildList proto.InternalMessageInfo + +func (m *BuildLog) Reset() { *m = BuildLog{} } +func (*BuildLog) ProtoMessage() {} +func (*BuildLog) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{10} +} +func (m *BuildLog) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuildLog) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BuildLog) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildLog.Merge(m, src) +} +func (m *BuildLog) XXX_Size() int { + return m.Size() +} +func (m *BuildLog) XXX_DiscardUnknown() { + xxx_messageInfo_BuildLog.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildLog proto.InternalMessageInfo + +func (m *BuildLogOptions) Reset() { *m = BuildLogOptions{} } +func (*BuildLogOptions) ProtoMessage() {} +func (*BuildLogOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{11} +} +func (m *BuildLogOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuildLogOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BuildLogOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildLogOptions.Merge(m, src) +} +func (m *BuildLogOptions) XXX_Size() int { + return m.Size() +} +func (m *BuildLogOptions) XXX_DiscardUnknown() { + xxx_messageInfo_BuildLogOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildLogOptions proto.InternalMessageInfo + +func (m *BuildOutput) Reset() { *m = BuildOutput{} } +func (*BuildOutput) ProtoMessage() {} +func (*BuildOutput) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{12} +} +func (m *BuildOutput) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuildOutput) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BuildOutput) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildOutput.Merge(m, src) +} +func (m *BuildOutput) XXX_Size() int { + return m.Size() +} +func (m *BuildOutput) XXX_DiscardUnknown() { + xxx_messageInfo_BuildOutput.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildOutput proto.InternalMessageInfo + +func (m *BuildPostCommitSpec) Reset() { *m = BuildPostCommitSpec{} } +func (*BuildPostCommitSpec) ProtoMessage() {} +func (*BuildPostCommitSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{13} +} +func (m *BuildPostCommitSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuildPostCommitSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BuildPostCommitSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildPostCommitSpec.Merge(m, src) +} +func (m *BuildPostCommitSpec) XXX_Size() int { + return m.Size() +} +func (m *BuildPostCommitSpec) XXX_DiscardUnknown() { + xxx_messageInfo_BuildPostCommitSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildPostCommitSpec proto.InternalMessageInfo + +func (m *BuildRequest) Reset() { *m = BuildRequest{} } +func (*BuildRequest) ProtoMessage() {} +func (*BuildRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{14} +} +func (m *BuildRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuildRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BuildRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildRequest.Merge(m, src) +} +func (m *BuildRequest) XXX_Size() int { + return m.Size() +} +func (m *BuildRequest) XXX_DiscardUnknown() { + xxx_messageInfo_BuildRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildRequest proto.InternalMessageInfo + +func (m *BuildSource) Reset() { *m = BuildSource{} } +func (*BuildSource) ProtoMessage() {} +func (*BuildSource) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{15} +} +func (m *BuildSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuildSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BuildSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildSource.Merge(m, src) +} +func (m *BuildSource) XXX_Size() int { + return m.Size() +} +func (m *BuildSource) XXX_DiscardUnknown() { + xxx_messageInfo_BuildSource.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildSource proto.InternalMessageInfo + +func (m *BuildSpec) Reset() { *m = BuildSpec{} } +func (*BuildSpec) ProtoMessage() {} +func (*BuildSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{16} +} +func (m *BuildSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuildSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BuildSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildSpec.Merge(m, src) +} +func (m *BuildSpec) XXX_Size() int { + return m.Size() +} +func (m *BuildSpec) XXX_DiscardUnknown() { + xxx_messageInfo_BuildSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildSpec proto.InternalMessageInfo + +func (m *BuildStatus) Reset() { *m = BuildStatus{} } +func (*BuildStatus) ProtoMessage() {} +func (*BuildStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{17} +} +func (m *BuildStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuildStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BuildStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildStatus.Merge(m, src) +} +func (m *BuildStatus) XXX_Size() int { + return m.Size() +} +func (m *BuildStatus) XXX_DiscardUnknown() { + xxx_messageInfo_BuildStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildStatus proto.InternalMessageInfo + +func (m *BuildStatusOutput) Reset() { *m = BuildStatusOutput{} } +func (*BuildStatusOutput) ProtoMessage() {} +func (*BuildStatusOutput) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{18} +} +func (m *BuildStatusOutput) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuildStatusOutput) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BuildStatusOutput) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildStatusOutput.Merge(m, src) +} +func (m *BuildStatusOutput) XXX_Size() int { + return m.Size() +} +func (m *BuildStatusOutput) XXX_DiscardUnknown() { + xxx_messageInfo_BuildStatusOutput.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildStatusOutput proto.InternalMessageInfo + +func (m *BuildStatusOutputTo) Reset() { *m = BuildStatusOutputTo{} } +func (*BuildStatusOutputTo) ProtoMessage() {} +func (*BuildStatusOutputTo) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{19} +} +func (m *BuildStatusOutputTo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuildStatusOutputTo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BuildStatusOutputTo) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildStatusOutputTo.Merge(m, src) +} +func (m *BuildStatusOutputTo) XXX_Size() int { + return m.Size() +} +func (m *BuildStatusOutputTo) XXX_DiscardUnknown() { + xxx_messageInfo_BuildStatusOutputTo.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildStatusOutputTo proto.InternalMessageInfo + +func (m *BuildStrategy) Reset() { *m = BuildStrategy{} } +func (*BuildStrategy) ProtoMessage() {} +func (*BuildStrategy) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{20} +} +func (m *BuildStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuildStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BuildStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildStrategy.Merge(m, src) +} +func (m *BuildStrategy) XXX_Size() int { + return m.Size() +} +func (m *BuildStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_BuildStrategy.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildStrategy proto.InternalMessageInfo + +func (m *BuildTriggerCause) Reset() { *m = BuildTriggerCause{} } +func (*BuildTriggerCause) ProtoMessage() {} +func (*BuildTriggerCause) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{21} +} +func (m *BuildTriggerCause) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuildTriggerCause) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BuildTriggerCause) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildTriggerCause.Merge(m, src) +} +func (m *BuildTriggerCause) XXX_Size() int { + return m.Size() +} +func (m *BuildTriggerCause) XXX_DiscardUnknown() { + xxx_messageInfo_BuildTriggerCause.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildTriggerCause proto.InternalMessageInfo + +func (m *BuildTriggerPolicy) Reset() { *m = BuildTriggerPolicy{} } +func (*BuildTriggerPolicy) ProtoMessage() {} +func (*BuildTriggerPolicy) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{22} +} +func (m *BuildTriggerPolicy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuildTriggerPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BuildTriggerPolicy) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildTriggerPolicy.Merge(m, src) +} +func (m *BuildTriggerPolicy) XXX_Size() int { + return m.Size() +} +func (m *BuildTriggerPolicy) XXX_DiscardUnknown() { + xxx_messageInfo_BuildTriggerPolicy.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildTriggerPolicy proto.InternalMessageInfo + +func (m *BuildVolume) Reset() { *m = BuildVolume{} } +func (*BuildVolume) ProtoMessage() {} +func (*BuildVolume) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{23} +} +func (m *BuildVolume) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuildVolume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BuildVolume) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildVolume.Merge(m, src) +} +func (m *BuildVolume) XXX_Size() int { + return m.Size() +} +func (m *BuildVolume) XXX_DiscardUnknown() { + xxx_messageInfo_BuildVolume.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildVolume proto.InternalMessageInfo + +func (m *BuildVolumeMount) Reset() { *m = BuildVolumeMount{} } +func (*BuildVolumeMount) ProtoMessage() {} +func (*BuildVolumeMount) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{24} +} +func (m *BuildVolumeMount) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuildVolumeMount) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BuildVolumeMount) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildVolumeMount.Merge(m, src) +} +func (m *BuildVolumeMount) XXX_Size() int { + return m.Size() +} +func (m *BuildVolumeMount) XXX_DiscardUnknown() { + xxx_messageInfo_BuildVolumeMount.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildVolumeMount proto.InternalMessageInfo + +func (m *BuildVolumeSource) Reset() { *m = BuildVolumeSource{} } +func (*BuildVolumeSource) ProtoMessage() {} +func (*BuildVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{25} +} +func (m *BuildVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BuildVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BuildVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildVolumeSource.Merge(m, src) +} +func (m *BuildVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *BuildVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_BuildVolumeSource.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildVolumeSource proto.InternalMessageInfo + +func (m *CommonSpec) Reset() { *m = CommonSpec{} } +func (*CommonSpec) ProtoMessage() {} +func (*CommonSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{26} +} +func (m *CommonSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CommonSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CommonSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_CommonSpec.Merge(m, src) +} +func (m *CommonSpec) XXX_Size() int { + return m.Size() +} +func (m *CommonSpec) XXX_DiscardUnknown() { + xxx_messageInfo_CommonSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_CommonSpec proto.InternalMessageInfo + +func (m *CommonWebHookCause) Reset() { *m = CommonWebHookCause{} } +func (*CommonWebHookCause) ProtoMessage() {} +func (*CommonWebHookCause) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{27} +} +func (m *CommonWebHookCause) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CommonWebHookCause) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CommonWebHookCause) XXX_Merge(src proto.Message) { + xxx_messageInfo_CommonWebHookCause.Merge(m, src) +} +func (m *CommonWebHookCause) XXX_Size() int { + return m.Size() +} +func (m *CommonWebHookCause) XXX_DiscardUnknown() { + xxx_messageInfo_CommonWebHookCause.DiscardUnknown(m) +} + +var xxx_messageInfo_CommonWebHookCause proto.InternalMessageInfo + +func (m *ConfigMapBuildSource) Reset() { *m = ConfigMapBuildSource{} } +func (*ConfigMapBuildSource) ProtoMessage() {} +func (*ConfigMapBuildSource) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{28} +} +func (m *ConfigMapBuildSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConfigMapBuildSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ConfigMapBuildSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConfigMapBuildSource.Merge(m, src) +} +func (m *ConfigMapBuildSource) XXX_Size() int { + return m.Size() +} +func (m *ConfigMapBuildSource) XXX_DiscardUnknown() { + xxx_messageInfo_ConfigMapBuildSource.DiscardUnknown(m) +} + +var xxx_messageInfo_ConfigMapBuildSource proto.InternalMessageInfo + +func (m *CustomBuildStrategy) Reset() { *m = CustomBuildStrategy{} } +func (*CustomBuildStrategy) ProtoMessage() {} +func (*CustomBuildStrategy) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{29} +} +func (m *CustomBuildStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CustomBuildStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CustomBuildStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_CustomBuildStrategy.Merge(m, src) +} +func (m *CustomBuildStrategy) XXX_Size() int { + return m.Size() +} +func (m *CustomBuildStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_CustomBuildStrategy.DiscardUnknown(m) +} + +var xxx_messageInfo_CustomBuildStrategy proto.InternalMessageInfo + +func (m *DockerBuildStrategy) Reset() { *m = DockerBuildStrategy{} } +func (*DockerBuildStrategy) ProtoMessage() {} +func (*DockerBuildStrategy) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{30} +} +func (m *DockerBuildStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DockerBuildStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DockerBuildStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_DockerBuildStrategy.Merge(m, src) +} +func (m *DockerBuildStrategy) XXX_Size() int { + return m.Size() +} +func (m *DockerBuildStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_DockerBuildStrategy.DiscardUnknown(m) +} + +var xxx_messageInfo_DockerBuildStrategy proto.InternalMessageInfo + +func (m *DockerStrategyOptions) Reset() { *m = DockerStrategyOptions{} } +func (*DockerStrategyOptions) ProtoMessage() {} +func (*DockerStrategyOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{31} +} +func (m *DockerStrategyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DockerStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DockerStrategyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_DockerStrategyOptions.Merge(m, src) +} +func (m *DockerStrategyOptions) XXX_Size() int { + return m.Size() +} +func (m *DockerStrategyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_DockerStrategyOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_DockerStrategyOptions proto.InternalMessageInfo + +func (m *GenericWebHookCause) Reset() { *m = GenericWebHookCause{} } +func (*GenericWebHookCause) ProtoMessage() {} +func (*GenericWebHookCause) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{32} +} +func (m *GenericWebHookCause) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GenericWebHookCause) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GenericWebHookCause) XXX_Merge(src proto.Message) { + xxx_messageInfo_GenericWebHookCause.Merge(m, src) +} +func (m *GenericWebHookCause) XXX_Size() int { + return m.Size() +} +func (m *GenericWebHookCause) XXX_DiscardUnknown() { + xxx_messageInfo_GenericWebHookCause.DiscardUnknown(m) +} + +var xxx_messageInfo_GenericWebHookCause proto.InternalMessageInfo + +func (m *GenericWebHookEvent) Reset() { *m = GenericWebHookEvent{} } +func (*GenericWebHookEvent) ProtoMessage() {} +func (*GenericWebHookEvent) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{33} +} +func (m *GenericWebHookEvent) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GenericWebHookEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GenericWebHookEvent) XXX_Merge(src proto.Message) { + xxx_messageInfo_GenericWebHookEvent.Merge(m, src) +} +func (m *GenericWebHookEvent) XXX_Size() int { + return m.Size() +} +func (m *GenericWebHookEvent) XXX_DiscardUnknown() { + xxx_messageInfo_GenericWebHookEvent.DiscardUnknown(m) +} + +var xxx_messageInfo_GenericWebHookEvent proto.InternalMessageInfo + +func (m *GitBuildSource) Reset() { *m = GitBuildSource{} } +func (*GitBuildSource) ProtoMessage() {} +func (*GitBuildSource) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{34} +} +func (m *GitBuildSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GitBuildSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GitBuildSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_GitBuildSource.Merge(m, src) +} +func (m *GitBuildSource) XXX_Size() int { + return m.Size() +} +func (m *GitBuildSource) XXX_DiscardUnknown() { + xxx_messageInfo_GitBuildSource.DiscardUnknown(m) +} + +var xxx_messageInfo_GitBuildSource proto.InternalMessageInfo + +func (m *GitHubWebHookCause) Reset() { *m = GitHubWebHookCause{} } +func (*GitHubWebHookCause) ProtoMessage() {} +func (*GitHubWebHookCause) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{35} +} +func (m *GitHubWebHookCause) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GitHubWebHookCause) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GitHubWebHookCause) XXX_Merge(src proto.Message) { + xxx_messageInfo_GitHubWebHookCause.Merge(m, src) +} +func (m *GitHubWebHookCause) XXX_Size() int { + return m.Size() +} +func (m *GitHubWebHookCause) XXX_DiscardUnknown() { + xxx_messageInfo_GitHubWebHookCause.DiscardUnknown(m) +} + +var xxx_messageInfo_GitHubWebHookCause proto.InternalMessageInfo + +func (m *GitInfo) Reset() { *m = GitInfo{} } +func (*GitInfo) ProtoMessage() {} +func (*GitInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{36} +} +func (m *GitInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GitInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GitInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_GitInfo.Merge(m, src) +} +func (m *GitInfo) XXX_Size() int { + return m.Size() +} +func (m *GitInfo) XXX_DiscardUnknown() { + xxx_messageInfo_GitInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_GitInfo proto.InternalMessageInfo + +func (m *GitLabWebHookCause) Reset() { *m = GitLabWebHookCause{} } +func (*GitLabWebHookCause) ProtoMessage() {} +func (*GitLabWebHookCause) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{37} +} +func (m *GitLabWebHookCause) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GitLabWebHookCause) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GitLabWebHookCause) XXX_Merge(src proto.Message) { + xxx_messageInfo_GitLabWebHookCause.Merge(m, src) +} +func (m *GitLabWebHookCause) XXX_Size() int { + return m.Size() +} +func (m *GitLabWebHookCause) XXX_DiscardUnknown() { + xxx_messageInfo_GitLabWebHookCause.DiscardUnknown(m) +} + +var xxx_messageInfo_GitLabWebHookCause proto.InternalMessageInfo + +func (m *GitRefInfo) Reset() { *m = GitRefInfo{} } +func (*GitRefInfo) ProtoMessage() {} +func (*GitRefInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{38} +} +func (m *GitRefInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GitRefInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GitRefInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_GitRefInfo.Merge(m, src) +} +func (m *GitRefInfo) XXX_Size() int { + return m.Size() +} +func (m *GitRefInfo) XXX_DiscardUnknown() { + xxx_messageInfo_GitRefInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_GitRefInfo proto.InternalMessageInfo + +func (m *GitSourceRevision) Reset() { *m = GitSourceRevision{} } +func (*GitSourceRevision) ProtoMessage() {} +func (*GitSourceRevision) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{39} +} +func (m *GitSourceRevision) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GitSourceRevision) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GitSourceRevision) XXX_Merge(src proto.Message) { + xxx_messageInfo_GitSourceRevision.Merge(m, src) +} +func (m *GitSourceRevision) XXX_Size() int { + return m.Size() +} +func (m *GitSourceRevision) XXX_DiscardUnknown() { + xxx_messageInfo_GitSourceRevision.DiscardUnknown(m) +} + +var xxx_messageInfo_GitSourceRevision proto.InternalMessageInfo + +func (m *ImageChangeCause) Reset() { *m = ImageChangeCause{} } +func (*ImageChangeCause) ProtoMessage() {} +func (*ImageChangeCause) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{40} +} +func (m *ImageChangeCause) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageChangeCause) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageChangeCause) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageChangeCause.Merge(m, src) +} +func (m *ImageChangeCause) XXX_Size() int { + return m.Size() +} +func (m *ImageChangeCause) XXX_DiscardUnknown() { + xxx_messageInfo_ImageChangeCause.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageChangeCause proto.InternalMessageInfo + +func (m *ImageChangeTrigger) Reset() { *m = ImageChangeTrigger{} } +func (*ImageChangeTrigger) ProtoMessage() {} +func (*ImageChangeTrigger) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{41} +} +func (m *ImageChangeTrigger) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageChangeTrigger) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageChangeTrigger) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageChangeTrigger.Merge(m, src) +} +func (m *ImageChangeTrigger) XXX_Size() int { + return m.Size() +} +func (m *ImageChangeTrigger) XXX_DiscardUnknown() { + xxx_messageInfo_ImageChangeTrigger.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageChangeTrigger proto.InternalMessageInfo + +func (m *ImageChangeTriggerStatus) Reset() { *m = ImageChangeTriggerStatus{} } +func (*ImageChangeTriggerStatus) ProtoMessage() {} +func (*ImageChangeTriggerStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{42} +} +func (m *ImageChangeTriggerStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageChangeTriggerStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageChangeTriggerStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageChangeTriggerStatus.Merge(m, src) +} +func (m *ImageChangeTriggerStatus) XXX_Size() int { + return m.Size() +} +func (m *ImageChangeTriggerStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ImageChangeTriggerStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageChangeTriggerStatus proto.InternalMessageInfo + +func (m *ImageLabel) Reset() { *m = ImageLabel{} } +func (*ImageLabel) ProtoMessage() {} +func (*ImageLabel) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{43} +} +func (m *ImageLabel) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageLabel) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageLabel) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageLabel.Merge(m, src) +} +func (m *ImageLabel) XXX_Size() int { + return m.Size() +} +func (m *ImageLabel) XXX_DiscardUnknown() { + xxx_messageInfo_ImageLabel.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageLabel proto.InternalMessageInfo + +func (m *ImageSource) Reset() { *m = ImageSource{} } +func (*ImageSource) ProtoMessage() {} +func (*ImageSource) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{44} +} +func (m *ImageSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageSource.Merge(m, src) +} +func (m *ImageSource) XXX_Size() int { + return m.Size() +} +func (m *ImageSource) XXX_DiscardUnknown() { + xxx_messageInfo_ImageSource.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageSource proto.InternalMessageInfo + +func (m *ImageSourcePath) Reset() { *m = ImageSourcePath{} } +func (*ImageSourcePath) ProtoMessage() {} +func (*ImageSourcePath) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{45} +} +func (m *ImageSourcePath) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageSourcePath) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageSourcePath) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageSourcePath.Merge(m, src) +} +func (m *ImageSourcePath) XXX_Size() int { + return m.Size() +} +func (m *ImageSourcePath) XXX_DiscardUnknown() { + xxx_messageInfo_ImageSourcePath.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageSourcePath proto.InternalMessageInfo + +func (m *ImageStreamTagReference) Reset() { *m = ImageStreamTagReference{} } +func (*ImageStreamTagReference) ProtoMessage() {} +func (*ImageStreamTagReference) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{46} +} +func (m *ImageStreamTagReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageStreamTagReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageStreamTagReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageStreamTagReference.Merge(m, src) +} +func (m *ImageStreamTagReference) XXX_Size() int { + return m.Size() +} +func (m *ImageStreamTagReference) XXX_DiscardUnknown() { + xxx_messageInfo_ImageStreamTagReference.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageStreamTagReference proto.InternalMessageInfo + +func (m *JenkinsPipelineBuildStrategy) Reset() { *m = JenkinsPipelineBuildStrategy{} } +func (*JenkinsPipelineBuildStrategy) ProtoMessage() {} +func (*JenkinsPipelineBuildStrategy) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{47} +} +func (m *JenkinsPipelineBuildStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *JenkinsPipelineBuildStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *JenkinsPipelineBuildStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_JenkinsPipelineBuildStrategy.Merge(m, src) +} +func (m *JenkinsPipelineBuildStrategy) XXX_Size() int { + return m.Size() +} +func (m *JenkinsPipelineBuildStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_JenkinsPipelineBuildStrategy.DiscardUnknown(m) +} + +var xxx_messageInfo_JenkinsPipelineBuildStrategy proto.InternalMessageInfo + +func (m *OptionalNodeSelector) Reset() { *m = OptionalNodeSelector{} } +func (*OptionalNodeSelector) ProtoMessage() {} +func (*OptionalNodeSelector) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{48} +} +func (m *OptionalNodeSelector) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *OptionalNodeSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *OptionalNodeSelector) XXX_Merge(src proto.Message) { + xxx_messageInfo_OptionalNodeSelector.Merge(m, src) +} +func (m *OptionalNodeSelector) XXX_Size() int { + return m.Size() +} +func (m *OptionalNodeSelector) XXX_DiscardUnknown() { + xxx_messageInfo_OptionalNodeSelector.DiscardUnknown(m) +} + +var xxx_messageInfo_OptionalNodeSelector proto.InternalMessageInfo + +func (m *ProxyConfig) Reset() { *m = ProxyConfig{} } +func (*ProxyConfig) ProtoMessage() {} +func (*ProxyConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{49} +} +func (m *ProxyConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ProxyConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ProxyConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProxyConfig.Merge(m, src) +} +func (m *ProxyConfig) XXX_Size() int { + return m.Size() +} +func (m *ProxyConfig) XXX_DiscardUnknown() { + xxx_messageInfo_ProxyConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_ProxyConfig proto.InternalMessageInfo + +func (m *SecretBuildSource) Reset() { *m = SecretBuildSource{} } +func (*SecretBuildSource) ProtoMessage() {} +func (*SecretBuildSource) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{50} +} +func (m *SecretBuildSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SecretBuildSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SecretBuildSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_SecretBuildSource.Merge(m, src) +} +func (m *SecretBuildSource) XXX_Size() int { + return m.Size() +} +func (m *SecretBuildSource) XXX_DiscardUnknown() { + xxx_messageInfo_SecretBuildSource.DiscardUnknown(m) +} + +var xxx_messageInfo_SecretBuildSource proto.InternalMessageInfo + +func (m *SecretLocalReference) Reset() { *m = SecretLocalReference{} } +func (*SecretLocalReference) ProtoMessage() {} +func (*SecretLocalReference) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{51} +} +func (m *SecretLocalReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SecretLocalReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SecretLocalReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_SecretLocalReference.Merge(m, src) +} +func (m *SecretLocalReference) XXX_Size() int { + return m.Size() +} +func (m *SecretLocalReference) XXX_DiscardUnknown() { + xxx_messageInfo_SecretLocalReference.DiscardUnknown(m) +} + +var xxx_messageInfo_SecretLocalReference proto.InternalMessageInfo + +func (m *SecretSpec) Reset() { *m = SecretSpec{} } +func (*SecretSpec) ProtoMessage() {} +func (*SecretSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{52} +} +func (m *SecretSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SecretSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SecretSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_SecretSpec.Merge(m, src) +} +func (m *SecretSpec) XXX_Size() int { + return m.Size() +} +func (m *SecretSpec) XXX_DiscardUnknown() { + xxx_messageInfo_SecretSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_SecretSpec proto.InternalMessageInfo + +func (m *SourceBuildStrategy) Reset() { *m = SourceBuildStrategy{} } +func (*SourceBuildStrategy) ProtoMessage() {} +func (*SourceBuildStrategy) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{53} +} +func (m *SourceBuildStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SourceBuildStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SourceBuildStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_SourceBuildStrategy.Merge(m, src) +} +func (m *SourceBuildStrategy) XXX_Size() int { + return m.Size() +} +func (m *SourceBuildStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_SourceBuildStrategy.DiscardUnknown(m) +} + +var xxx_messageInfo_SourceBuildStrategy proto.InternalMessageInfo + +func (m *SourceControlUser) Reset() { *m = SourceControlUser{} } +func (*SourceControlUser) ProtoMessage() {} +func (*SourceControlUser) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{54} +} +func (m *SourceControlUser) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SourceControlUser) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SourceControlUser) XXX_Merge(src proto.Message) { + xxx_messageInfo_SourceControlUser.Merge(m, src) +} +func (m *SourceControlUser) XXX_Size() int { + return m.Size() +} +func (m *SourceControlUser) XXX_DiscardUnknown() { + xxx_messageInfo_SourceControlUser.DiscardUnknown(m) +} + +var xxx_messageInfo_SourceControlUser proto.InternalMessageInfo + +func (m *SourceRevision) Reset() { *m = SourceRevision{} } +func (*SourceRevision) ProtoMessage() {} +func (*SourceRevision) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{55} +} +func (m *SourceRevision) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SourceRevision) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SourceRevision) XXX_Merge(src proto.Message) { + xxx_messageInfo_SourceRevision.Merge(m, src) +} +func (m *SourceRevision) XXX_Size() int { + return m.Size() +} +func (m *SourceRevision) XXX_DiscardUnknown() { + xxx_messageInfo_SourceRevision.DiscardUnknown(m) +} + +var xxx_messageInfo_SourceRevision proto.InternalMessageInfo + +func (m *SourceStrategyOptions) Reset() { *m = SourceStrategyOptions{} } +func (*SourceStrategyOptions) ProtoMessage() {} +func (*SourceStrategyOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{56} +} +func (m *SourceStrategyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SourceStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SourceStrategyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_SourceStrategyOptions.Merge(m, src) +} +func (m *SourceStrategyOptions) XXX_Size() int { + return m.Size() +} +func (m *SourceStrategyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_SourceStrategyOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_SourceStrategyOptions proto.InternalMessageInfo + +func (m *StageInfo) Reset() { *m = StageInfo{} } +func (*StageInfo) ProtoMessage() {} +func (*StageInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{57} +} +func (m *StageInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StageInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StageInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_StageInfo.Merge(m, src) +} +func (m *StageInfo) XXX_Size() int { + return m.Size() +} +func (m *StageInfo) XXX_DiscardUnknown() { + xxx_messageInfo_StageInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_StageInfo proto.InternalMessageInfo + +func (m *StepInfo) Reset() { *m = StepInfo{} } +func (*StepInfo) ProtoMessage() {} +func (*StepInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{58} +} +func (m *StepInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StepInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StepInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_StepInfo.Merge(m, src) +} +func (m *StepInfo) XXX_Size() int { + return m.Size() +} +func (m *StepInfo) XXX_DiscardUnknown() { + xxx_messageInfo_StepInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_StepInfo proto.InternalMessageInfo + +func (m *WebHookTrigger) Reset() { *m = WebHookTrigger{} } +func (*WebHookTrigger) ProtoMessage() {} +func (*WebHookTrigger) Descriptor() ([]byte, []int) { + return fileDescriptor_2ba579f6f004cb75, []int{59} +} +func (m *WebHookTrigger) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WebHookTrigger) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *WebHookTrigger) XXX_Merge(src proto.Message) { + xxx_messageInfo_WebHookTrigger.Merge(m, src) +} +func (m *WebHookTrigger) XXX_Size() int { + return m.Size() +} +func (m *WebHookTrigger) XXX_DiscardUnknown() { + xxx_messageInfo_WebHookTrigger.DiscardUnknown(m) +} + +var xxx_messageInfo_WebHookTrigger proto.InternalMessageInfo + +func init() { + proto.RegisterType((*BinaryBuildRequestOptions)(nil), "github.com.openshift.api.build.v1.BinaryBuildRequestOptions") + proto.RegisterType((*BinaryBuildSource)(nil), "github.com.openshift.api.build.v1.BinaryBuildSource") + proto.RegisterType((*BitbucketWebHookCause)(nil), "github.com.openshift.api.build.v1.BitbucketWebHookCause") + proto.RegisterType((*Build)(nil), "github.com.openshift.api.build.v1.Build") + proto.RegisterType((*BuildCondition)(nil), "github.com.openshift.api.build.v1.BuildCondition") + proto.RegisterType((*BuildConfig)(nil), "github.com.openshift.api.build.v1.BuildConfig") + proto.RegisterType((*BuildConfigList)(nil), "github.com.openshift.api.build.v1.BuildConfigList") + proto.RegisterType((*BuildConfigSpec)(nil), "github.com.openshift.api.build.v1.BuildConfigSpec") + proto.RegisterType((*BuildConfigStatus)(nil), "github.com.openshift.api.build.v1.BuildConfigStatus") + proto.RegisterType((*BuildList)(nil), "github.com.openshift.api.build.v1.BuildList") + proto.RegisterType((*BuildLog)(nil), "github.com.openshift.api.build.v1.BuildLog") + proto.RegisterType((*BuildLogOptions)(nil), "github.com.openshift.api.build.v1.BuildLogOptions") + proto.RegisterType((*BuildOutput)(nil), "github.com.openshift.api.build.v1.BuildOutput") + proto.RegisterType((*BuildPostCommitSpec)(nil), "github.com.openshift.api.build.v1.BuildPostCommitSpec") + proto.RegisterType((*BuildRequest)(nil), "github.com.openshift.api.build.v1.BuildRequest") + proto.RegisterType((*BuildSource)(nil), "github.com.openshift.api.build.v1.BuildSource") + proto.RegisterType((*BuildSpec)(nil), "github.com.openshift.api.build.v1.BuildSpec") + proto.RegisterType((*BuildStatus)(nil), "github.com.openshift.api.build.v1.BuildStatus") + proto.RegisterType((*BuildStatusOutput)(nil), "github.com.openshift.api.build.v1.BuildStatusOutput") + proto.RegisterType((*BuildStatusOutputTo)(nil), "github.com.openshift.api.build.v1.BuildStatusOutputTo") + proto.RegisterType((*BuildStrategy)(nil), "github.com.openshift.api.build.v1.BuildStrategy") + proto.RegisterType((*BuildTriggerCause)(nil), "github.com.openshift.api.build.v1.BuildTriggerCause") + proto.RegisterType((*BuildTriggerPolicy)(nil), "github.com.openshift.api.build.v1.BuildTriggerPolicy") + proto.RegisterType((*BuildVolume)(nil), "github.com.openshift.api.build.v1.BuildVolume") + proto.RegisterType((*BuildVolumeMount)(nil), "github.com.openshift.api.build.v1.BuildVolumeMount") + proto.RegisterType((*BuildVolumeSource)(nil), "github.com.openshift.api.build.v1.BuildVolumeSource") + proto.RegisterType((*CommonSpec)(nil), "github.com.openshift.api.build.v1.CommonSpec") + proto.RegisterType((*CommonWebHookCause)(nil), "github.com.openshift.api.build.v1.CommonWebHookCause") + proto.RegisterType((*ConfigMapBuildSource)(nil), "github.com.openshift.api.build.v1.ConfigMapBuildSource") + proto.RegisterType((*CustomBuildStrategy)(nil), "github.com.openshift.api.build.v1.CustomBuildStrategy") + proto.RegisterType((*DockerBuildStrategy)(nil), "github.com.openshift.api.build.v1.DockerBuildStrategy") + proto.RegisterType((*DockerStrategyOptions)(nil), "github.com.openshift.api.build.v1.DockerStrategyOptions") + proto.RegisterType((*GenericWebHookCause)(nil), "github.com.openshift.api.build.v1.GenericWebHookCause") + proto.RegisterType((*GenericWebHookEvent)(nil), "github.com.openshift.api.build.v1.GenericWebHookEvent") + proto.RegisterType((*GitBuildSource)(nil), "github.com.openshift.api.build.v1.GitBuildSource") + proto.RegisterType((*GitHubWebHookCause)(nil), "github.com.openshift.api.build.v1.GitHubWebHookCause") + proto.RegisterType((*GitInfo)(nil), "github.com.openshift.api.build.v1.GitInfo") + proto.RegisterType((*GitLabWebHookCause)(nil), "github.com.openshift.api.build.v1.GitLabWebHookCause") + proto.RegisterType((*GitRefInfo)(nil), "github.com.openshift.api.build.v1.GitRefInfo") + proto.RegisterType((*GitSourceRevision)(nil), "github.com.openshift.api.build.v1.GitSourceRevision") + proto.RegisterType((*ImageChangeCause)(nil), "github.com.openshift.api.build.v1.ImageChangeCause") + proto.RegisterType((*ImageChangeTrigger)(nil), "github.com.openshift.api.build.v1.ImageChangeTrigger") + proto.RegisterType((*ImageChangeTriggerStatus)(nil), "github.com.openshift.api.build.v1.ImageChangeTriggerStatus") + proto.RegisterType((*ImageLabel)(nil), "github.com.openshift.api.build.v1.ImageLabel") + proto.RegisterType((*ImageSource)(nil), "github.com.openshift.api.build.v1.ImageSource") + proto.RegisterType((*ImageSourcePath)(nil), "github.com.openshift.api.build.v1.ImageSourcePath") + proto.RegisterType((*ImageStreamTagReference)(nil), "github.com.openshift.api.build.v1.ImageStreamTagReference") + proto.RegisterType((*JenkinsPipelineBuildStrategy)(nil), "github.com.openshift.api.build.v1.JenkinsPipelineBuildStrategy") + proto.RegisterType((*OptionalNodeSelector)(nil), "github.com.openshift.api.build.v1.OptionalNodeSelector") + proto.RegisterMapType((map[string]string)(nil), "github.com.openshift.api.build.v1.OptionalNodeSelector.ItemsEntry") + proto.RegisterType((*ProxyConfig)(nil), "github.com.openshift.api.build.v1.ProxyConfig") + proto.RegisterType((*SecretBuildSource)(nil), "github.com.openshift.api.build.v1.SecretBuildSource") + proto.RegisterType((*SecretLocalReference)(nil), "github.com.openshift.api.build.v1.SecretLocalReference") + proto.RegisterType((*SecretSpec)(nil), "github.com.openshift.api.build.v1.SecretSpec") + proto.RegisterType((*SourceBuildStrategy)(nil), "github.com.openshift.api.build.v1.SourceBuildStrategy") + proto.RegisterType((*SourceControlUser)(nil), "github.com.openshift.api.build.v1.SourceControlUser") + proto.RegisterType((*SourceRevision)(nil), "github.com.openshift.api.build.v1.SourceRevision") + proto.RegisterType((*SourceStrategyOptions)(nil), "github.com.openshift.api.build.v1.SourceStrategyOptions") + proto.RegisterType((*StageInfo)(nil), "github.com.openshift.api.build.v1.StageInfo") + proto.RegisterType((*StepInfo)(nil), "github.com.openshift.api.build.v1.StepInfo") + proto.RegisterType((*WebHookTrigger)(nil), "github.com.openshift.api.build.v1.WebHookTrigger") +} + +func init() { + proto.RegisterFile("github.com/openshift/api/build/v1/generated.proto", fileDescriptor_2ba579f6f004cb75) +} + +var fileDescriptor_2ba579f6f004cb75 = []byte{ + // 4386 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x5c, 0x4d, 0x6c, 0x1c, 0x47, + 0x76, 0x56, 0xcf, 0x0f, 0x67, 0xe6, 0x0d, 0x45, 0x52, 0x45, 0xc9, 0x1a, 0x69, 0xb5, 0x1c, 0xb9, + 0x1d, 0x1b, 0x76, 0x6c, 0x0f, 0x57, 0xb2, 0xa4, 0xc8, 0x36, 0xe2, 0x80, 0x43, 0x52, 0x32, 0xb5, + 0x23, 0x89, 0xa8, 0xa1, 0x65, 0xef, 0x5a, 0xd8, 0xa4, 0xd9, 0x53, 0x33, 0x6c, 0x73, 0xa6, 0x7b, + 0xdc, 0xd5, 0x43, 0x9b, 0x0b, 0x04, 0x58, 0x04, 0x58, 0x24, 0xeb, 0xbd, 0x64, 0x2f, 0x8b, 0x24, + 0x97, 0x24, 0x58, 0xe4, 0x94, 0x53, 0x02, 0x04, 0xd8, 0x60, 0x2f, 0x01, 0xb2, 0x07, 0x1f, 0x12, + 0x60, 0x83, 0x04, 0x88, 0x81, 0x5d, 0x0c, 0x62, 0xe6, 0x10, 0x20, 0x87, 0x00, 0xb9, 0xea, 0x10, + 0x04, 0xf5, 0xd3, 0xdd, 0x55, 0x3d, 0x3d, 0x54, 0x0f, 0x25, 0x3b, 0x9b, 0xe4, 0xc6, 0xa9, 0xf7, + 0xde, 0xf7, 0xea, 0xe7, 0xd5, 0xab, 0xf7, 0x5e, 0x55, 0x13, 0xae, 0xf4, 0x9c, 0x60, 0x6f, 0xb4, + 0xdb, 0xb0, 0xbd, 0xc1, 0xaa, 0x37, 0x24, 0x2e, 0xdd, 0x73, 0xba, 0xc1, 0xaa, 0x35, 0x74, 0x56, + 0x77, 0x47, 0x4e, 0xbf, 0xb3, 0x7a, 0x70, 0x65, 0xb5, 0x47, 0x5c, 0xe2, 0x5b, 0x01, 0xe9, 0x34, + 0x86, 0xbe, 0x17, 0x78, 0xe8, 0xd9, 0x58, 0xa4, 0x11, 0x89, 0x34, 0xac, 0xa1, 0xd3, 0xe0, 0x22, + 0x8d, 0x83, 0x2b, 0x17, 0x5f, 0x55, 0x50, 0x7b, 0x5e, 0xcf, 0x5b, 0xe5, 0x92, 0xbb, 0xa3, 0x2e, + 0xff, 0xc5, 0x7f, 0xf0, 0xbf, 0x04, 0xe2, 0x45, 0x73, 0xff, 0x26, 0x6d, 0x38, 0x1e, 0x57, 0x6b, + 0x7b, 0x3e, 0x49, 0xd1, 0x7a, 0xf1, 0x5a, 0xcc, 0x33, 0xb0, 0xec, 0x3d, 0xc7, 0x25, 0xfe, 0xe1, + 0xea, 0x70, 0xbf, 0xc7, 0x1a, 0xe8, 0xea, 0x80, 0x04, 0x56, 0x9a, 0xd4, 0x8d, 0x69, 0x52, 0xfe, + 0xc8, 0x0d, 0x9c, 0x01, 0x59, 0xa5, 0xf6, 0x1e, 0x19, 0x58, 0x49, 0x39, 0xf3, 0x6f, 0x0a, 0x70, + 0xa1, 0xe9, 0xb8, 0x96, 0x7f, 0xd8, 0x64, 0x63, 0xc2, 0xe4, 0xc3, 0x11, 0xa1, 0xc1, 0xfd, 0x61, + 0xe0, 0x78, 0x2e, 0x45, 0xbf, 0x05, 0x65, 0xa6, 0xb0, 0x63, 0x05, 0x56, 0xcd, 0xb8, 0x6c, 0xbc, + 0x58, 0xbd, 0xfa, 0xb5, 0x86, 0x50, 0xd4, 0x50, 0x15, 0x35, 0x86, 0xfb, 0x3d, 0xd6, 0x40, 0x1b, + 0x8c, 0xbb, 0x71, 0x70, 0xa5, 0x71, 0x7f, 0xf7, 0x03, 0x62, 0x07, 0x77, 0x49, 0x60, 0x35, 0xd1, + 0xa7, 0xe3, 0xfa, 0xa9, 0xa3, 0x71, 0x1d, 0xe2, 0x36, 0x1c, 0xa1, 0xa2, 0x17, 0x60, 0xce, 0xa2, + 0xb7, 0x9c, 0x3e, 0xa9, 0xe5, 0x2e, 0x1b, 0x2f, 0x56, 0x9a, 0x0b, 0x92, 0x7b, 0x6e, 0x8d, 0xb7, + 0x62, 0x49, 0x45, 0x37, 0x60, 0xc1, 0x27, 0x07, 0x0e, 0x75, 0x3c, 0x77, 0xdd, 0x1b, 0x0c, 0x9c, + 0xa0, 0x96, 0xd7, 0xf9, 0x45, 0x2b, 0x4e, 0x70, 0xa1, 0xd7, 0x61, 0x31, 0x6c, 0xb9, 0x4b, 0x28, + 0xb5, 0x7a, 0xa4, 0x56, 0xe0, 0x82, 0x8b, 0x52, 0xb0, 0x24, 0x9b, 0x71, 0x92, 0x0f, 0x35, 0x01, + 0x85, 0x4d, 0x6b, 0xa3, 0x60, 0xcf, 0xf3, 0xef, 0x59, 0x03, 0x52, 0x2b, 0x72, 0xe9, 0x68, 0x50, + 0x31, 0x05, 0xa7, 0x70, 0xa3, 0x4d, 0x58, 0xd6, 0x5b, 0x37, 0x07, 0x96, 0xd3, 0xaf, 0xcd, 0x71, + 0x90, 0x65, 0x09, 0x52, 0x55, 0x48, 0x38, 0x8d, 0x1f, 0x7d, 0x1d, 0xce, 0xe9, 0xe3, 0x0a, 0x88, + 0xe8, 0x4d, 0x89, 0x03, 0x9d, 0x93, 0x40, 0xa7, 0x35, 0x22, 0x4e, 0x97, 0x41, 0xf7, 0xe0, 0x99, + 0x09, 0x82, 0xe8, 0x56, 0x99, 0xa3, 0x3d, 0x23, 0xd1, 0x16, 0x74, 0x2a, 0x9e, 0x22, 0x65, 0xbe, + 0x09, 0x67, 0x14, 0x0b, 0x6a, 0x7b, 0x23, 0xdf, 0x26, 0xca, 0xba, 0x1a, 0xc7, 0xad, 0xab, 0xf9, + 0x89, 0x01, 0xe7, 0x9a, 0x4e, 0xb0, 0x3b, 0xb2, 0xf7, 0x49, 0xf0, 0x2e, 0xd9, 0x7d, 0xdb, 0xf3, + 0xf6, 0xd7, 0xad, 0x11, 0x25, 0xe8, 0x43, 0x00, 0xdb, 0x1b, 0x0c, 0x3c, 0xb7, 0x3d, 0x24, 0xb6, + 0xb4, 0xbe, 0xeb, 0x8d, 0xc7, 0x6e, 0xc9, 0xc6, 0x3a, 0x17, 0x52, 0xa1, 0x9a, 0x17, 0xa5, 0x72, + 0x34, 0x49, 0xc3, 0x8a, 0x12, 0xf3, 0x07, 0x39, 0x28, 0xf2, 0x41, 0x7c, 0x09, 0x86, 0x7f, 0x0f, + 0x0a, 0x94, 0x0d, 0x2c, 0xc7, 0xd1, 0x5f, 0xc9, 0x30, 0x30, 0x31, 0xbd, 0x43, 0x62, 0x37, 0xe7, + 0x25, 0x72, 0x81, 0xfd, 0xc2, 0x1c, 0x07, 0x3d, 0x80, 0x39, 0x1a, 0x58, 0xc1, 0x88, 0xf2, 0x8d, + 0x51, 0xbd, 0xda, 0xc8, 0x8c, 0xc8, 0xa5, 0xe2, 0x05, 0x12, 0xbf, 0xb1, 0x44, 0x33, 0xff, 0x3e, + 0x0f, 0x0b, 0x9c, 0x6f, 0xdd, 0x73, 0x3b, 0x0e, 0x73, 0x0b, 0xe8, 0x06, 0x14, 0x82, 0xc3, 0x61, + 0xb8, 0xb2, 0x66, 0xd8, 0x99, 0x9d, 0xc3, 0x21, 0x79, 0x34, 0xae, 0x23, 0x9d, 0x9b, 0xb5, 0x62, + 0xce, 0x8f, 0x5a, 0x51, 0x17, 0xc5, 0x5e, 0xbf, 0xa6, 0xab, 0x7c, 0x34, 0xae, 0xa7, 0xf8, 0xc7, + 0x46, 0x84, 0xa4, 0x77, 0x0c, 0x7d, 0x00, 0x0b, 0x7d, 0x8b, 0x06, 0xef, 0x0c, 0x3b, 0x56, 0x40, + 0x76, 0x9c, 0x01, 0xe1, 0xbb, 0xaa, 0x7a, 0xf5, 0x57, 0xb3, 0x2d, 0x14, 0x93, 0x88, 0x4d, 0xbd, + 0xa5, 0x21, 0xe1, 0x04, 0x32, 0x3a, 0x00, 0xc4, 0x5a, 0x76, 0x7c, 0xcb, 0xa5, 0x62, 0x54, 0x4c, + 0x5f, 0x7e, 0x66, 0x7d, 0x91, 0x21, 0xb6, 0x26, 0xd0, 0x70, 0x8a, 0x06, 0xb6, 0x8b, 0x7c, 0x62, + 0x51, 0xcf, 0x95, 0x4e, 0x2b, 0x5a, 0x24, 0xcc, 0x5b, 0xb1, 0xa4, 0xa2, 0x97, 0xa0, 0x34, 0x90, + 0xde, 0xad, 0x98, 0xee, 0xdd, 0x42, 0xba, 0xf9, 0xa3, 0x1c, 0x54, 0xc3, 0x15, 0xea, 0x3a, 0xbd, + 0x2f, 0xc1, 0xd2, 0x77, 0x34, 0x4b, 0xbf, 0x9a, 0xd5, 0x2e, 0x45, 0xff, 0xa6, 0xda, 0xfb, 0xc3, + 0x84, 0xbd, 0x5f, 0x9b, 0x11, 0xf7, 0x78, 0xab, 0xff, 0xa9, 0x01, 0x8b, 0x0a, 0x77, 0xcb, 0xa1, + 0x01, 0x7a, 0x38, 0x31, 0x53, 0x8d, 0x6c, 0x33, 0xc5, 0xa4, 0xf9, 0x3c, 0x2d, 0x49, 0x6d, 0xe5, + 0xb0, 0x45, 0x99, 0xa5, 0x36, 0x14, 0x9d, 0x80, 0x0c, 0xd8, 0xde, 0xc8, 0xcf, 0xb2, 0x7d, 0x45, + 0x07, 0x9b, 0xa7, 0x25, 0x74, 0x71, 0x8b, 0x81, 0x60, 0x81, 0x65, 0xfe, 0x22, 0xaf, 0x0d, 0x83, + 0x4d, 0x1f, 0xb2, 0xa1, 0x1c, 0xf8, 0x4e, 0xaf, 0x47, 0x7c, 0x5a, 0x33, 0xb8, 0xae, 0xeb, 0x59, + 0x75, 0xed, 0x08, 0xb9, 0x6d, 0xaf, 0xef, 0xd8, 0x87, 0xf1, 0x68, 0x64, 0x33, 0xc5, 0x11, 0x30, + 0x5a, 0x83, 0x8a, 0x3f, 0x72, 0x05, 0xa3, 0xdc, 0xed, 0xcf, 0x49, 0xf6, 0x0a, 0x0e, 0x09, 0x8f, + 0xc6, 0x75, 0xe1, 0x5a, 0xa2, 0x16, 0x1c, 0x4b, 0x21, 0x4b, 0xf3, 0xff, 0x62, 0x91, 0x5f, 0xcd, + 0xec, 0xff, 0xb9, 0xdd, 0x44, 0x76, 0x19, 0xb7, 0xa9, 0xfe, 0x1e, 0x75, 0xe0, 0x12, 0x1d, 0xd9, + 0x36, 0xa1, 0xb4, 0x3b, 0xea, 0xf3, 0x9e, 0xd0, 0xb7, 0x1d, 0x1a, 0x78, 0xfe, 0x61, 0xcb, 0x61, + 0x21, 0x06, 0xdb, 0x74, 0xc5, 0xe6, 0xe5, 0xa3, 0x71, 0xfd, 0x52, 0xfb, 0x18, 0x3e, 0x7c, 0x2c, + 0x0a, 0x7a, 0x0f, 0x6a, 0x5d, 0xcb, 0xe9, 0x93, 0x4e, 0x8a, 0x86, 0x22, 0xd7, 0x70, 0xe9, 0x68, + 0x5c, 0xaf, 0xdd, 0x9a, 0xc2, 0x83, 0xa7, 0x4a, 0x9b, 0xff, 0x6c, 0xc0, 0x99, 0x09, 0x9b, 0x46, + 0xd7, 0xa1, 0xca, 0x5c, 0xc9, 0x03, 0xe2, 0xb3, 0xc3, 0x9a, 0x9b, 0x6a, 0x3e, 0x8e, 0x35, 0x5a, + 0x31, 0x09, 0xab, 0x7c, 0xe8, 0x13, 0x03, 0x96, 0x9d, 0x81, 0xd5, 0x23, 0xeb, 0x7b, 0x96, 0xdb, + 0x23, 0xe1, 0xa2, 0x4a, 0x7b, 0x7c, 0x33, 0xc3, 0xcc, 0x6f, 0x4d, 0x48, 0xcb, 0x5d, 0xf6, 0x15, + 0xa9, 0x7c, 0x79, 0x92, 0x83, 0xe2, 0x34, 0xa5, 0xe6, 0x8f, 0x0d, 0xa8, 0xf0, 0x91, 0x7d, 0x09, + 0x3b, 0xef, 0xae, 0xbe, 0xf3, 0x5e, 0xcc, 0xba, 0x1b, 0xa6, 0xec, 0x39, 0x80, 0xb2, 0xe8, 0xb9, + 0xd7, 0x33, 0xff, 0xb3, 0x20, 0xf7, 0x5f, 0xcb, 0xeb, 0x85, 0x31, 0xf5, 0x2a, 0x54, 0x6c, 0xcf, + 0x0d, 0x2c, 0xd6, 0x65, 0x79, 0x84, 0x9e, 0x09, 0xb7, 0xc6, 0x7a, 0x48, 0xc0, 0x31, 0x0f, 0x3b, + 0x04, 0xba, 0x5e, 0xbf, 0xef, 0x7d, 0xc4, 0x37, 0x52, 0x39, 0xf6, 0x59, 0xb7, 0x78, 0x2b, 0x96, + 0x54, 0xf4, 0x0a, 0x94, 0x87, 0x2c, 0x44, 0xf3, 0xa4, 0x4f, 0x2c, 0xc7, 0xa3, 0xde, 0x96, 0xed, + 0x38, 0xe2, 0x40, 0xd7, 0x60, 0x9e, 0x3a, 0xae, 0x4d, 0xda, 0xc4, 0xf6, 0xdc, 0x0e, 0xe5, 0xb6, + 0x9e, 0x6f, 0x2e, 0x1d, 0x8d, 0xeb, 0xf3, 0x6d, 0xa5, 0x1d, 0x6b, 0x5c, 0xe8, 0x5d, 0xa8, 0xf0, + 0xdf, 0xfc, 0xfc, 0x2b, 0xce, 0x7c, 0xfe, 0x9d, 0x66, 0x83, 0x6c, 0x87, 0x00, 0x38, 0xc6, 0x42, + 0x57, 0x01, 0x58, 0x9a, 0x42, 0x03, 0x6b, 0x30, 0xa4, 0xfc, 0x24, 0x2f, 0xc7, 0xdb, 0x77, 0x27, + 0xa2, 0x60, 0x85, 0x0b, 0xbd, 0x0c, 0x95, 0xc0, 0x72, 0xfa, 0x2d, 0xc7, 0x25, 0x94, 0x47, 0xc2, + 0x79, 0xa1, 0x60, 0x27, 0x6c, 0xc4, 0x31, 0x1d, 0x35, 0x00, 0xfa, 0x6c, 0xd3, 0x34, 0x0f, 0x03, + 0x42, 0x79, 0xa4, 0x9b, 0x6f, 0x2e, 0x30, 0xf0, 0x56, 0xd4, 0x8a, 0x15, 0x0e, 0x36, 0xeb, 0xae, + 0xf7, 0x91, 0xe5, 0x04, 0xb5, 0x8a, 0x3e, 0xeb, 0xf7, 0xbc, 0x77, 0x2d, 0x27, 0xc0, 0x92, 0x8a, + 0x9e, 0x87, 0xd2, 0x81, 0xdc, 0x69, 0xc0, 0x41, 0xab, 0xec, 0xd8, 0x0d, 0x77, 0x58, 0x48, 0x43, + 0x7b, 0x70, 0xc9, 0x71, 0x29, 0xb1, 0x47, 0x3e, 0x69, 0xef, 0x3b, 0xc3, 0x9d, 0x56, 0xfb, 0x01, + 0xf1, 0x9d, 0xee, 0x61, 0xd3, 0xb2, 0xf7, 0x89, 0xdb, 0xa9, 0x55, 0xb9, 0x92, 0x5f, 0x91, 0x4a, + 0x2e, 0x6d, 0x1d, 0xc3, 0x8b, 0x8f, 0x45, 0x32, 0x3f, 0x09, 0x0f, 0xf8, 0xfb, 0xa3, 0x60, 0x38, + 0x0a, 0xd0, 0x9b, 0x90, 0x0b, 0x3c, 0xb9, 0x6d, 0x9e, 0x53, 0xd6, 0xaa, 0xc1, 0x02, 0xac, 0xf8, + 0x20, 0xc7, 0xa4, 0x4b, 0x7c, 0xe2, 0xda, 0xa4, 0x39, 0x77, 0x34, 0xae, 0xe7, 0x76, 0x3c, 0x9c, + 0x0b, 0x3c, 0xf4, 0x1e, 0xc0, 0x70, 0x44, 0xf7, 0xda, 0xc4, 0xf6, 0x49, 0x20, 0x4f, 0xf0, 0x17, + 0xd3, 0x40, 0x5a, 0x9e, 0x6d, 0xf5, 0x93, 0x48, 0x7c, 0x7e, 0xb7, 0x23, 0x79, 0xac, 0x60, 0xa1, + 0x0e, 0x54, 0xf9, 0xc6, 0x6f, 0x59, 0xbb, 0xa4, 0xcf, 0x0c, 0x36, 0x9f, 0xd1, 0xbf, 0x6f, 0x45, + 0x52, 0xb1, 0x53, 0x8b, 0xdb, 0x28, 0x56, 0x61, 0xcd, 0xdf, 0x31, 0x60, 0x99, 0x4f, 0xc6, 0xb6, + 0x47, 0x03, 0x91, 0xb7, 0x70, 0xcf, 0xff, 0x3c, 0x94, 0xd8, 0x39, 0x60, 0xb9, 0x1d, 0x7e, 0x06, + 0x56, 0xc4, 0xaa, 0xad, 0x8b, 0x26, 0x1c, 0xd2, 0xd0, 0x25, 0x28, 0x58, 0x7e, 0x4f, 0x78, 0x86, + 0x4a, 0xb3, 0xcc, 0x42, 0x90, 0x35, 0xbf, 0x47, 0x31, 0x6f, 0x65, 0x26, 0x42, 0x6d, 0xdf, 0x19, + 0x4e, 0xe4, 0xa2, 0x6d, 0xde, 0x8a, 0x25, 0xd5, 0xfc, 0x69, 0x09, 0xe6, 0xd5, 0xec, 0xfa, 0x4b, + 0x88, 0xb9, 0xde, 0x87, 0x72, 0x98, 0xad, 0xc9, 0x55, 0xbb, 0x92, 0x61, 0x6a, 0x45, 0xee, 0x86, + 0xa5, 0x60, 0x73, 0x9e, 0xb9, 0x8e, 0xf0, 0x17, 0x8e, 0x00, 0x11, 0x81, 0x25, 0x79, 0xd0, 0x93, + 0x4e, 0xf3, 0x90, 0xcf, 0xbd, 0x3c, 0x9f, 0x33, 0xd9, 0xd7, 0xd9, 0xa3, 0x71, 0x7d, 0x69, 0x27, + 0x01, 0x80, 0x27, 0x20, 0xd1, 0x1a, 0x14, 0xba, 0xbe, 0x37, 0xe0, 0x9e, 0x29, 0x23, 0x34, 0x5f, + 0xa1, 0x5b, 0xbe, 0x37, 0xc0, 0x5c, 0x14, 0xbd, 0x07, 0x73, 0xbb, 0x3c, 0x35, 0x95, 0xbe, 0x2a, + 0x53, 0x90, 0x98, 0xcc, 0x65, 0x9b, 0xc0, 0xd6, 0x54, 0x34, 0x63, 0x89, 0x87, 0xae, 0xe8, 0x87, + 0xec, 0x1c, 0xdf, 0xfa, 0x8b, 0xc7, 0x1e, 0xb0, 0xaf, 0x43, 0x9e, 0xb8, 0x07, 0xb5, 0x12, 0xb7, + 0xf4, 0x8b, 0x69, 0xc3, 0xd9, 0x74, 0x0f, 0x1e, 0x58, 0x7e, 0xb3, 0x2a, 0x97, 0x36, 0xbf, 0xe9, + 0x1e, 0x60, 0x26, 0x83, 0xf6, 0xa1, 0xaa, 0x4c, 0x4f, 0xad, 0xcc, 0x21, 0xae, 0xcd, 0x18, 0xb6, + 0x89, 0x5c, 0x38, 0xda, 0x33, 0xca, 0x0a, 0x60, 0x15, 0x1d, 0x7d, 0xcf, 0x80, 0x73, 0x1d, 0xcf, + 0xde, 0x67, 0xc7, 0xb7, 0x6f, 0x05, 0xa4, 0x77, 0x28, 0x8f, 0x2e, 0xee, 0x09, 0xab, 0x57, 0x6f, + 0x66, 0xd0, 0xbb, 0x91, 0x26, 0xdf, 0xbc, 0x70, 0x34, 0xae, 0x9f, 0x4b, 0x25, 0xe1, 0x74, 0x8d, + 0xbc, 0x2f, 0x94, 0xaf, 0x42, 0xb2, 0x2f, 0x90, 0xb9, 0x2f, 0xed, 0x34, 0x79, 0xd1, 0x97, 0x54, + 0x12, 0x4e, 0xd7, 0x68, 0xfe, 0x53, 0x51, 0x3a, 0x56, 0x59, 0xe2, 0x78, 0x4d, 0x4b, 0x83, 0xeb, + 0x89, 0x34, 0x78, 0x51, 0x61, 0x55, 0x72, 0xe0, 0xd8, 0x22, 0x73, 0x4f, 0xd9, 0x22, 0x1b, 0x00, + 0x62, 0x0e, 0xbb, 0x4e, 0x9f, 0x84, 0x1e, 0x89, 0x39, 0x88, 0x8d, 0xa8, 0x15, 0x2b, 0x1c, 0xa8, + 0x05, 0xf9, 0x9e, 0x8c, 0x71, 0xb3, 0x79, 0x87, 0xdb, 0x4e, 0xa0, 0xf6, 0xa1, 0xc4, 0x2c, 0xf4, + 0xb6, 0x13, 0x60, 0x06, 0x83, 0x1e, 0xc0, 0x1c, 0xf7, 0xbb, 0xb4, 0x56, 0xcc, 0x9c, 0xbf, 0xf0, + 0x6d, 0x2e, 0xd1, 0x22, 0xdf, 0xc9, 0x1b, 0x29, 0x96, 0x68, 0x2c, 0x2e, 0x60, 0x91, 0x10, 0xf9, + 0x38, 0xd8, 0x70, 0x7c, 0x59, 0x37, 0x53, 0xc2, 0xfa, 0x90, 0x82, 0x15, 0x2e, 0xf4, 0x2d, 0x98, + 0x97, 0x2b, 0x28, 0x8e, 0xad, 0xd2, 0x8c, 0xc7, 0x96, 0x08, 0x82, 0x14, 0x04, 0xac, 0xe1, 0xa1, + 0xdf, 0x84, 0x12, 0xe5, 0x7f, 0xd1, 0x19, 0x76, 0xa2, 0x90, 0x55, 0x27, 0x30, 0xca, 0xd1, 0x05, + 0x89, 0xe2, 0x10, 0x15, 0xed, 0xf3, 0x41, 0x77, 0x9d, 0xde, 0x5d, 0x6b, 0xc8, 0x76, 0x1d, 0xd3, + 0xf1, 0x6b, 0x99, 0x52, 0x1f, 0x29, 0xa4, 0xaa, 0x51, 0x67, 0x4b, 0x42, 0x62, 0x05, 0xde, 0xfc, + 0x79, 0x18, 0x6a, 0xf3, 0x83, 0xd1, 0x4a, 0xa9, 0xba, 0x3d, 0xe5, 0xac, 0x2b, 0xe1, 0xcc, 0x72, + 0x5f, 0xa4, 0x33, 0x33, 0xff, 0xa3, 0x14, 0x6e, 0x5a, 0x91, 0x1c, 0x5d, 0x81, 0xe2, 0x70, 0xcf, + 0xa2, 0xe1, 0xae, 0x0d, 0x33, 0x93, 0xe2, 0x36, 0x6b, 0x7c, 0x34, 0xae, 0x83, 0x88, 0x16, 0xd8, + 0x2f, 0x2c, 0x38, 0x79, 0xc0, 0x6e, 0xb9, 0x36, 0xe9, 0xf7, 0x49, 0x47, 0x86, 0xe0, 0x71, 0xc0, + 0x1e, 0x12, 0x70, 0xcc, 0x83, 0x6e, 0x44, 0x55, 0x1b, 0xb1, 0x0b, 0x57, 0xf4, 0xaa, 0xcd, 0x23, + 0x66, 0x5d, 0xa2, 0xdc, 0x30, 0xb5, 0x8a, 0x53, 0x38, 0xbe, 0x8a, 0x83, 0xba, 0xb0, 0x40, 0x03, + 0xcb, 0x0f, 0xa2, 0xc8, 0xf8, 0x04, 0xc1, 0x38, 0x3a, 0x1a, 0xd7, 0x17, 0xda, 0x1a, 0x0a, 0x4e, + 0xa0, 0xa2, 0x11, 0x2c, 0xdb, 0xde, 0x60, 0xd8, 0x27, 0x61, 0x49, 0x4a, 0x28, 0x9b, 0xbd, 0xd2, + 0x76, 0x9e, 0xa5, 0x7f, 0xeb, 0x93, 0x50, 0x38, 0x0d, 0x1f, 0xfd, 0x3a, 0x94, 0x3b, 0x23, 0xdf, + 0x62, 0x8d, 0x32, 0xb0, 0x7f, 0x36, 0x4c, 0x65, 0x36, 0x64, 0xfb, 0xa3, 0x71, 0xfd, 0x34, 0xcb, + 0x05, 0x1a, 0x61, 0x03, 0x8e, 0x44, 0xd0, 0x2e, 0x5c, 0xf4, 0x78, 0xf0, 0x2b, 0x5c, 0x9f, 0x08, + 0x30, 0xc2, 0xed, 0x2d, 0xab, 0xdc, 0x61, 0xd9, 0xf2, 0xe2, 0xfd, 0xa9, 0x9c, 0xf8, 0x18, 0x14, + 0x74, 0x1b, 0xe6, 0xc4, 0x26, 0x92, 0xa7, 0x62, 0xa6, 0xf8, 0x04, 0xc4, 0x4d, 0x05, 0x13, 0xc3, + 0x52, 0x1c, 0x3d, 0x84, 0x39, 0xa1, 0x46, 0x1e, 0x69, 0xd7, 0x66, 0x2b, 0xdc, 0x8a, 0xee, 0xc7, + 0xfe, 0x53, 0xfc, 0xc6, 0x12, 0x13, 0xed, 0xf0, 0x32, 0x19, 0xf3, 0xcb, 0x55, 0xbe, 0xcf, 0xb2, + 0x14, 0x9a, 0xdb, 0x4c, 0x60, 0xcb, 0xed, 0x7a, 0x5a, 0x79, 0x8c, 0x7b, 0x65, 0x81, 0xc5, 0xbc, + 0x72, 0xdf, 0xeb, 0xb5, 0x5d, 0x67, 0x38, 0x24, 0x41, 0x6d, 0x5e, 0xf7, 0xca, 0xad, 0x88, 0x82, + 0x15, 0x2e, 0x44, 0xb8, 0x53, 0x13, 0xa5, 0x5c, 0x5a, 0x3b, 0xcd, 0x7b, 0x73, 0x65, 0x86, 0x2a, + 0x97, 0x90, 0xd4, 0xdc, 0x99, 0x04, 0xc3, 0x0a, 0xb0, 0x69, 0xcb, 0x92, 0x88, 0x3a, 0x3b, 0xe8, + 0x9e, 0x92, 0x03, 0xdd, 0x38, 0xc9, 0xfc, 0xee, 0x78, 0x6a, 0x5a, 0x64, 0xb6, 0x64, 0x56, 0xa1, + 0xb3, 0xa0, 0xeb, 0x32, 0xa7, 0xd9, 0x70, 0x7a, 0x84, 0x06, 0xd2, 0xc5, 0xe8, 0x49, 0x8a, 0x20, + 0x61, 0x95, 0xcf, 0xfc, 0x49, 0x01, 0x4e, 0x4b, 0x38, 0x11, 0x71, 0xa0, 0xeb, 0x5a, 0x68, 0xf1, + 0x6c, 0x22, 0xb4, 0x38, 0xa3, 0x31, 0x2b, 0xc1, 0x85, 0x0f, 0x0b, 0x7a, 0x18, 0x25, 0x83, 0x8c, + 0x1b, 0x99, 0x23, 0x36, 0x0d, 0x59, 0x78, 0x08, 0x3d, 0x5e, 0xc3, 0x09, 0x0d, 0x4c, 0xa7, 0x1e, + 0x2e, 0xc9, 0x54, 0xe0, 0x46, 0xe6, 0xc8, 0x2c, 0x45, 0xa7, 0x1e, 0x97, 0xe1, 0x84, 0x06, 0xa6, + 0xd3, 0x1e, 0xd1, 0xc0, 0x1b, 0x44, 0x3a, 0x0b, 0x99, 0x75, 0xae, 0x73, 0xc1, 0x14, 0x9d, 0xeb, + 0x1a, 0x22, 0x4e, 0x68, 0x40, 0x3f, 0x34, 0xe0, 0xfc, 0x07, 0xc4, 0xdd, 0x77, 0x5c, 0xba, 0xed, + 0x0c, 0x49, 0xdf, 0x71, 0xe3, 0x11, 0x0b, 0xdf, 0xfb, 0x1b, 0x19, 0xb4, 0xdf, 0xd1, 0x11, 0xf4, + 0x6e, 0x7c, 0xe5, 0x68, 0x5c, 0x3f, 0x7f, 0x27, 0x5d, 0x07, 0x9e, 0xa6, 0xdc, 0xfc, 0x6e, 0x51, + 0x5a, 0xbc, 0x7a, 0x32, 0xaa, 0x67, 0x89, 0xf1, 0x98, 0xb3, 0xc4, 0x87, 0x05, 0x7e, 0x2b, 0xec, + 0xd8, 0xf2, 0x62, 0x6c, 0x06, 0xab, 0xb9, 0xad, 0x09, 0x8a, 0x43, 0x99, 0xcf, 0xa6, 0x4e, 0xc0, + 0x09, 0x0d, 0xc8, 0x85, 0xd3, 0x02, 0x3c, 0x54, 0x99, 0xcf, 0x7c, 0xbf, 0x77, 0xdb, 0x09, 0xde, + 0x8e, 0xe4, 0x84, 0xc6, 0x33, 0x47, 0xe3, 0xfa, 0x69, 0xad, 0x1d, 0xeb, 0xf0, 0x68, 0x04, 0x4b, + 0x4a, 0x99, 0x91, 0x4f, 0x97, 0xb4, 0x99, 0xd7, 0x66, 0x2b, 0x6c, 0x0a, 0x85, 0x3c, 0x85, 0xdd, + 0x4a, 0x00, 0xe2, 0x09, 0x15, 0x72, 0x98, 0x7d, 0x2b, 0x1a, 0x66, 0x71, 0x96, 0x61, 0xb6, 0xac, + 0xf4, 0x61, 0xc6, 0xed, 0x58, 0x87, 0x47, 0xdf, 0x86, 0xa5, 0xdd, 0xc4, 0x65, 0xaa, 0x3c, 0xab, + 0x6f, 0x66, 0xca, 0x33, 0x52, 0xee, 0x61, 0xc5, 0x58, 0x93, 0x24, 0x3c, 0xa1, 0xc7, 0xfc, 0x71, + 0x01, 0xd0, 0xe4, 0x2d, 0x01, 0xba, 0xa6, 0xb9, 0xb2, 0xcb, 0x09, 0x57, 0xb6, 0xa4, 0x4a, 0x28, + 0x9e, 0xec, 0x21, 0xcc, 0x89, 0xfe, 0xce, 0x50, 0xbd, 0x90, 0x1d, 0x91, 0x60, 0x69, 0x46, 0x21, + 0x31, 0x59, 0x00, 0x2f, 0xed, 0x51, 0xda, 0xdd, 0x09, 0xe0, 0xd3, 0xac, 0x3c, 0x44, 0x45, 0x7b, + 0xf2, 0x20, 0x10, 0xb6, 0x20, 0x2d, 0xed, 0xfa, 0x89, 0x4a, 0xe8, 0xa2, 0xa8, 0xa0, 0xb4, 0x63, + 0x15, 0x5a, 0x4e, 0x54, 0xdf, 0xda, 0x95, 0xa6, 0xf5, 0x04, 0x13, 0xa5, 0x98, 0x95, 0xc4, 0x44, + 0x04, 0x2a, 0xd1, 0x3a, 0x4b, 0x43, 0x3a, 0x81, 0x82, 0x74, 0x0b, 0x8a, 0x91, 0xcd, 0x7f, 0x37, + 0x64, 0x90, 0xfe, 0xc0, 0xeb, 0x8f, 0x06, 0x04, 0x5d, 0x86, 0x82, 0x6b, 0x0d, 0x42, 0x9b, 0x89, + 0x6e, 0xff, 0xf8, 0xa3, 0x06, 0x4e, 0xe1, 0xb7, 0x7f, 0xfc, 0x4c, 0x98, 0x25, 0x8d, 0x8e, 0x35, + 0x24, 0x93, 0x4e, 0x59, 0xf8, 0x92, 0x98, 0xe8, 0x7d, 0x98, 0x1b, 0x78, 0x23, 0x37, 0x08, 0xcb, + 0x92, 0xaf, 0xcd, 0x86, 0x7e, 0x97, 0xc9, 0xc6, 0xe0, 0xfc, 0x27, 0xc5, 0x12, 0xd2, 0x7c, 0x07, + 0x96, 0x92, 0xbc, 0x68, 0x0d, 0x16, 0x3b, 0x84, 0x06, 0x8e, 0xcb, 0xe3, 0xd7, 0x6d, 0x2b, 0xd8, + 0x93, 0x63, 0x3f, 0x2f, 0x41, 0x16, 0x37, 0x74, 0x32, 0x4e, 0xf2, 0x9b, 0x7f, 0x99, 0x93, 0xc7, + 0x80, 0x3a, 0x42, 0xf4, 0xba, 0xb6, 0xfb, 0x9e, 0x4f, 0xec, 0xbe, 0x73, 0x13, 0x02, 0xca, 0x16, + 0xbc, 0x03, 0x73, 0x54, 0x2d, 0xfb, 0xbe, 0x90, 0x16, 0xe0, 0x8a, 0xd4, 0x55, 0x9b, 0x54, 0x1e, + 0xe3, 0xca, 0xbc, 0x59, 0x22, 0xa0, 0x07, 0xfc, 0xce, 0x43, 0x64, 0x9c, 0x72, 0xcb, 0xbd, 0x94, + 0x06, 0x17, 0xa5, 0xa8, 0x1a, 0xe2, 0x69, 0x79, 0x35, 0x22, 0x48, 0x38, 0x86, 0x42, 0x6f, 0x41, + 0xde, 0xa6, 0xce, 0x71, 0x15, 0xc2, 0xf5, 0xf6, 0x96, 0x86, 0xc5, 0xab, 0x16, 0xeb, 0xed, 0x2d, + 0xcc, 0x04, 0xcd, 0xdf, 0x2b, 0x81, 0x92, 0xa5, 0xa2, 0xb7, 0x60, 0x81, 0x12, 0xff, 0xc0, 0xb1, + 0xc9, 0x9a, 0x6d, 0xb3, 0x85, 0x91, 0xf3, 0x16, 0x3d, 0x13, 0x68, 0x6b, 0x54, 0x9c, 0xe0, 0xe6, + 0x6f, 0x30, 0x54, 0xab, 0xcc, 0xfe, 0x06, 0xe3, 0x71, 0xf6, 0x18, 0x57, 0x73, 0xf3, 0x4f, 0xbb, + 0x9a, 0xfb, 0x2d, 0x28, 0x53, 0x3d, 0x8c, 0xfa, 0x5a, 0xf6, 0x08, 0x59, 0x46, 0x2e, 0xd1, 0x45, + 0x53, 0x14, 0xae, 0x44, 0x98, 0x6c, 0x52, 0x64, 0x7e, 0x53, 0x9c, 0x6d, 0x52, 0x1e, 0x93, 0xd9, + 0x7c, 0x03, 0x2a, 0x3e, 0x11, 0x13, 0x44, 0xa5, 0x6f, 0x4a, 0x2d, 0xf1, 0x60, 0xc9, 0x84, 0xc9, + 0x87, 0x23, 0xc7, 0x27, 0x03, 0xe2, 0x06, 0x34, 0x4e, 0xe0, 0x43, 0x2a, 0xc5, 0x31, 0x1a, 0xfa, + 0x00, 0x60, 0x18, 0xdd, 0x17, 0xc8, 0xf2, 0x51, 0xe6, 0xb4, 0x41, 0xbf, 0x69, 0x88, 0xf3, 0x95, + 0xb8, 0x1d, 0x2b, 0xe8, 0xe8, 0x7d, 0xb8, 0x10, 0x67, 0xc0, 0x1b, 0xc4, 0xea, 0xf0, 0xe0, 0x4e, + 0x5e, 0xca, 0x89, 0x6b, 0xaa, 0xaf, 0x1e, 0x8d, 0xeb, 0x17, 0xd6, 0xa7, 0x31, 0xe1, 0xe9, 0xf2, + 0x68, 0x00, 0xf3, 0xae, 0xd7, 0x21, 0x6d, 0xd2, 0x27, 0x76, 0xe0, 0xf9, 0x32, 0x55, 0xcd, 0x52, + 0x4a, 0x12, 0x45, 0x4f, 0xab, 0x7f, 0x4f, 0x11, 0x17, 0x85, 0x31, 0xb5, 0x05, 0x6b, 0xf0, 0xe8, + 0x0d, 0x58, 0xe0, 0x4e, 0x6e, 0xc7, 0x1f, 0xd1, 0x80, 0x74, 0xd6, 0xd7, 0x78, 0x4a, 0x5b, 0x16, + 0x67, 0xe5, 0x5d, 0x8d, 0x82, 0x13, 0x9c, 0xe6, 0x1f, 0x1a, 0x90, 0xf2, 0x3c, 0x4b, 0x33, 0x7d, + 0xe3, 0x69, 0x9b, 0xfe, 0x0b, 0x9a, 0x8b, 0x53, 0x2f, 0x70, 0x34, 0xf7, 0x65, 0xfe, 0x85, 0x01, + 0x67, 0xd3, 0x6a, 0x6b, 0xcc, 0x06, 0x63, 0xbf, 0x66, 0xcc, 0x58, 0x66, 0x54, 0x6f, 0x7d, 0xd3, + 0x5c, 0xdb, 0x82, 0xe2, 0xe2, 0x37, 0x1c, 0x5f, 0xf6, 0x31, 0xf2, 0x45, 0x1b, 0x1a, 0x15, 0x27, + 0xb8, 0xcd, 0xef, 0x17, 0x60, 0x39, 0x25, 0xd7, 0x41, 0x9b, 0xf2, 0x56, 0x65, 0x86, 0x0b, 0xc1, + 0xe8, 0x00, 0xd6, 0x6e, 0x56, 0x60, 0x38, 0xea, 0xf7, 0x9f, 0xec, 0x62, 0x30, 0x94, 0xc7, 0x0a, + 0x56, 0x78, 0x4d, 0x92, 0x3f, 0xc1, 0x35, 0xc9, 0x1d, 0x40, 0xe4, 0xe3, 0xa1, 0x47, 0x89, 0xcc, + 0x59, 0x3d, 0x1e, 0xb7, 0x14, 0xb8, 0x0d, 0x46, 0x4f, 0xaf, 0x36, 0x27, 0x38, 0x70, 0x8a, 0x14, + 0x5a, 0x85, 0x4a, 0xd7, 0xf3, 0x6d, 0xc2, 0x7a, 0xc9, 0x3d, 0x97, 0x52, 0xf5, 0xbb, 0x15, 0x12, + 0x70, 0xcc, 0x83, 0xde, 0x8b, 0xab, 0xc2, 0x73, 0x99, 0x2f, 0x33, 0xc5, 0x98, 0xb9, 0xa3, 0x98, + 0x5e, 0x0e, 0x5e, 0x83, 0x45, 0x2e, 0xb0, 0xb6, 0xbd, 0x15, 0xde, 0x37, 0x95, 0xf4, 0xe8, 0xa0, + 0xa9, 0x93, 0x71, 0x92, 0xdf, 0xfc, 0x51, 0x11, 0x96, 0x53, 0x32, 0xfc, 0xe8, 0x8e, 0xcd, 0x78, + 0x92, 0x3b, 0xb6, 0x2f, 0xca, 0x12, 0x5e, 0x82, 0x92, 0xeb, 0xad, 0x5b, 0xf6, 0x1e, 0x91, 0xef, + 0x19, 0xa2, 0x29, 0xba, 0x27, 0x9a, 0x71, 0x48, 0x0f, 0x8d, 0xa6, 0x70, 0x02, 0xa3, 0x99, 0x79, + 0xa1, 0xdf, 0x0a, 0xab, 0x2c, 0x5d, 0xa7, 0x4f, 0x78, 0xac, 0x36, 0x97, 0xd8, 0x99, 0x1a, 0x15, + 0x27, 0xb8, 0xd1, 0xd7, 0xa1, 0x22, 0x96, 0xc7, 0xef, 0xd1, 0x0c, 0xb7, 0x81, 0x51, 0x67, 0x9a, + 0xa1, 0x10, 0x8e, 0xe5, 0xd1, 0x10, 0xce, 0xf3, 0x74, 0x80, 0xf9, 0xeb, 0x81, 0xf3, 0x6d, 0x11, + 0x0f, 0x8a, 0x67, 0x57, 0xa2, 0xce, 0x79, 0xe3, 0x68, 0x5c, 0x3f, 0xbf, 0x95, 0xce, 0xf2, 0x68, + 0x3a, 0x09, 0x4f, 0x83, 0x45, 0xdf, 0x80, 0xd2, 0x01, 0x8f, 0xa8, 0xc2, 0x9b, 0x89, 0xc6, 0x6c, + 0xd1, 0x71, 0xbc, 0x8a, 0xe2, 0x37, 0xc5, 0x21, 0x9e, 0xf9, 0x7d, 0x03, 0xd2, 0xaf, 0x07, 0xf5, + 0x39, 0x33, 0x9e, 0x70, 0xce, 0x9e, 0x8f, 0xed, 0x4a, 0x94, 0xf3, 0xab, 0x69, 0x36, 0x65, 0xfe, + 0x91, 0x01, 0xcb, 0x29, 0xf5, 0x8d, 0x5f, 0x8e, 0x23, 0xe9, 0xb3, 0x5c, 0xb2, 0x73, 0x9b, 0x07, + 0xc4, 0x0d, 0x4e, 0x76, 0x29, 0xb9, 0x29, 0xae, 0x02, 0x73, 0xb2, 0xaa, 0x9f, 0xa9, 0x38, 0xc1, + 0xeb, 0xc3, 0xfa, 0x1d, 0xe0, 0x13, 0x78, 0xee, 0xe9, 0x77, 0xce, 0x85, 0x2f, 0xfb, 0xce, 0xd9, + 0xfc, 0x2b, 0x03, 0x16, 0xf4, 0xbb, 0x4e, 0xf4, 0x55, 0xc8, 0x8f, 0x7c, 0x47, 0x4e, 0x6a, 0xd4, + 0xfb, 0x77, 0xf0, 0x16, 0x66, 0xed, 0x8c, 0xec, 0x93, 0xae, 0x5c, 0xb1, 0x88, 0x8c, 0x49, 0x17, + 0xb3, 0x76, 0x44, 0xa0, 0x3a, 0xf4, 0xbd, 0x8f, 0x0f, 0xc5, 0x39, 0x3f, 0xc3, 0xfb, 0xec, 0xed, + 0x58, 0x2a, 0x2e, 0x23, 0x2b, 0x8d, 0x58, 0xc5, 0xe5, 0x11, 0xd4, 0x64, 0x71, 0xec, 0x97, 0xc3, + 0x5c, 0xff, 0x2e, 0x07, 0x25, 0x69, 0x34, 0xe8, 0x43, 0x58, 0xe8, 0x69, 0xd3, 0x3b, 0x43, 0xb7, + 0x12, 0x77, 0xd0, 0x91, 0xcb, 0xd5, 0xdb, 0x71, 0x42, 0x01, 0xfa, 0x6d, 0x38, 0xd3, 0x73, 0x02, + 0x7d, 0x4c, 0x33, 0x54, 0x0e, 0x6e, 0x27, 0x65, 0x9b, 0x17, 0xa4, 0xe2, 0x33, 0x13, 0x24, 0x3c, + 0xa9, 0x09, 0xdd, 0x87, 0x82, 0x4f, 0xba, 0xb3, 0x3c, 0x72, 0x62, 0x7b, 0x8a, 0x74, 0xf9, 0x1e, + 0x8b, 0xa2, 0x2f, 0x4c, 0xba, 0x14, 0x73, 0x20, 0xf3, 0x77, 0xc5, 0x52, 0x27, 0x0a, 0x84, 0xff, + 0x13, 0x9f, 0x4c, 0xfc, 0x97, 0x01, 0x10, 0x77, 0xf6, 0xff, 0xdf, 0xda, 0x9a, 0x7f, 0x9e, 0x83, + 0x49, 0x46, 0xb6, 0x2f, 0x6c, 0x91, 0x3d, 0x1a, 0xa9, 0x9f, 0x29, 0x49, 0x2a, 0x7a, 0x08, 0x73, + 0x16, 0xff, 0xce, 0x67, 0x86, 0x1e, 0x0b, 0x55, 0xeb, 0x9e, 0x1b, 0xf8, 0x5e, 0xff, 0x1d, 0x4a, + 0x7c, 0xe5, 0xe3, 0x1a, 0x8e, 0x85, 0x25, 0x26, 0x22, 0x2c, 0x3d, 0x91, 0xdf, 0xea, 0xcc, 0xf0, + 0x4c, 0x7e, 0x52, 0x81, 0x92, 0xaa, 0x48, 0x38, 0x1c, 0x23, 0xcf, 0x70, 0x6f, 0x6d, 0x7e, 0xcf, + 0x80, 0xa5, 0x64, 0x35, 0x9d, 0xc9, 0xf3, 0x60, 0x63, 0x6b, 0x23, 0x79, 0x57, 0xb1, 0x25, 0x9a, + 0x71, 0x48, 0x47, 0x77, 0xa0, 0xc4, 0x82, 0x4e, 0x2c, 0xbd, 0x6d, 0xc6, 0x90, 0x95, 0x9f, 0xef, + 0xb7, 0x84, 0x1c, 0x0e, 0x01, 0xcc, 0x7f, 0x30, 0x00, 0x4d, 0xd6, 0x5b, 0xd1, 0x36, 0x9c, 0x15, + 0x5f, 0x62, 0xc8, 0x47, 0x04, 0x5b, 0x5a, 0xd7, 0x2e, 0xc9, 0xae, 0x9d, 0x6d, 0xa5, 0xf0, 0xe0, + 0x54, 0xc9, 0x28, 0xc8, 0xce, 0x9d, 0x3c, 0xc8, 0x7e, 0x01, 0xe6, 0x86, 0x6c, 0xae, 0x3a, 0x32, + 0x12, 0x8e, 0x56, 0x7c, 0x9b, 0xb7, 0x62, 0x49, 0x35, 0xff, 0x3a, 0x07, 0xb5, 0x69, 0xcf, 0xb0, + 0xbf, 0x80, 0x91, 0x3d, 0xd4, 0x46, 0xf6, 0x46, 0xe6, 0x37, 0x3f, 0x81, 0x4f, 0xac, 0xc1, 0x8e, + 0xd5, 0x3b, 0x3e, 0xc7, 0x1c, 0xc0, 0xa2, 0xa2, 0xf5, 0x84, 0x9f, 0xdc, 0x44, 0x39, 0x52, 0x4b, + 0x87, 0xc2, 0x49, 0x6c, 0xb3, 0x0d, 0x10, 0xbf, 0x23, 0xcd, 0x50, 0x83, 0x7e, 0x0e, 0x8a, 0x07, + 0x56, 0x7f, 0x14, 0x7e, 0xb9, 0x18, 0xbd, 0x06, 0x7f, 0xc0, 0x1a, 0xb1, 0xa0, 0x99, 0x7f, 0x9c, + 0x83, 0xaa, 0xf2, 0xce, 0xe9, 0x69, 0xa5, 0xdf, 0xcf, 0x40, 0xce, 0xa2, 0x3c, 0xdd, 0xa9, 0x88, + 0x8b, 0xe9, 0x35, 0x8a, 0x73, 0x16, 0x45, 0xef, 0x42, 0x71, 0x68, 0x05, 0x7b, 0xe1, 0x5b, 0xf6, + 0xab, 0xb3, 0xbd, 0xc2, 0x62, 0xe9, 0x49, 0x3c, 0x0e, 0xf6, 0x8b, 0x62, 0x81, 0x97, 0xc8, 0xf2, + 0xf2, 0x4f, 0x2f, 0xcb, 0x33, 0xbf, 0x6b, 0xc0, 0x62, 0xa2, 0x0f, 0xe8, 0x2a, 0x00, 0x8d, 0x7e, + 0xc9, 0x25, 0x88, 0x0a, 0x69, 0x31, 0x1f, 0x56, 0xb8, 0x9e, 0xb8, 0x60, 0xd2, 0x87, 0xf3, 0x53, + 0x8c, 0x93, 0xa5, 0x88, 0x6c, 0xc5, 0xe9, 0xd0, 0xb2, 0x49, 0xf2, 0xc9, 0xfe, 0xbd, 0x90, 0x80, + 0x63, 0x9e, 0xc8, 0x78, 0x72, 0xd3, 0x8c, 0xc7, 0xfc, 0x47, 0x03, 0x2e, 0x1d, 0x77, 0x19, 0xcc, + 0x92, 0x7e, 0x79, 0xe3, 0x1b, 0xa5, 0x99, 0x89, 0x2b, 0x81, 0x3b, 0x3a, 0x19, 0x27, 0xf9, 0xd1, + 0x75, 0xa8, 0x2a, 0x4d, 0xb2, 0x33, 0x51, 0x1c, 0xa9, 0x88, 0x63, 0x95, 0xef, 0x09, 0xc2, 0x78, + 0xf3, 0x6f, 0x0d, 0x38, 0x9b, 0x56, 0x39, 0x44, 0xbd, 0xf0, 0x1b, 0x0b, 0x91, 0xbb, 0x35, 0x4f, + 0x58, 0x81, 0x6c, 0xf0, 0x2f, 0x2d, 0x36, 0xdd, 0xc0, 0x3f, 0x4c, 0xff, 0xfa, 0xe2, 0xe2, 0x4d, + 0x80, 0x98, 0x07, 0x2d, 0x41, 0x7e, 0x9f, 0x1c, 0x8a, 0x89, 0xc3, 0xec, 0x4f, 0x74, 0x56, 0xdb, + 0xb4, 0x72, 0x97, 0xbe, 0x91, 0xbb, 0x69, 0xbc, 0x51, 0xfe, 0x83, 0x3f, 0xa9, 0x9f, 0xfa, 0xce, + 0x2f, 0x2e, 0x9f, 0x32, 0x7f, 0x60, 0x80, 0x1a, 0x65, 0xa3, 0x97, 0xa1, 0xb2, 0x17, 0x04, 0x43, + 0xde, 0x24, 0x9f, 0x74, 0xf1, 0x2b, 0x89, 0xb7, 0x77, 0x76, 0xb6, 0x79, 0x23, 0x8e, 0xe9, 0xa8, + 0x01, 0xc0, 0x7e, 0x50, 0xc1, 0x5d, 0x88, 0x9f, 0x61, 0x32, 0xee, 0xb6, 0x60, 0x57, 0x38, 0x44, + 0x32, 0x2a, 0x98, 0xc5, 0xa7, 0x7b, 0x32, 0x19, 0x15, 0x9c, 0x21, 0xcd, 0xfc, 0x33, 0x03, 0xce, + 0x4c, 0x3c, 0x21, 0x44, 0xdb, 0x51, 0xf8, 0x3d, 0x6b, 0xf1, 0x71, 0x4a, 0xa0, 0xfe, 0xc4, 0xbb, + 0xe8, 0x26, 0x9c, 0x15, 0x88, 0x5c, 0x6b, 0xbc, 0x85, 0x1e, 0xeb, 0x4e, 0xcd, 0x3f, 0x35, 0x00, + 0xe2, 0x72, 0x18, 0xda, 0x85, 0x79, 0xd1, 0x25, 0x2d, 0x8e, 0xcc, 0x3e, 0xc0, 0xb3, 0x52, 0xc5, + 0x7c, 0x5b, 0x41, 0xc1, 0x1a, 0x26, 0xdb, 0xd7, 0xbc, 0x0a, 0xcd, 0x77, 0x57, 0x4e, 0xdf, 0xd7, + 0x77, 0x43, 0x02, 0x8e, 0x79, 0xcc, 0x9f, 0xe7, 0x61, 0x39, 0xe5, 0xd1, 0xca, 0xff, 0xe9, 0xa2, + 0xea, 0x4b, 0x50, 0x12, 0xdf, 0x31, 0xd0, 0x64, 0x74, 0x27, 0x3e, 0x73, 0xa0, 0x38, 0xa4, 0xa3, + 0x2b, 0x50, 0x75, 0x5c, 0x5b, 0xdc, 0xb1, 0x58, 0x61, 0x31, 0x4d, 0xdc, 0x5f, 0xc7, 0xcd, 0x58, + 0xe5, 0xd1, 0xab, 0x6f, 0x73, 0x19, 0xaa, 0x6f, 0x5f, 0x60, 0xf9, 0xe9, 0x9b, 0x70, 0x66, 0x22, + 0xf4, 0xcd, 0x16, 0x07, 0x10, 0xfe, 0xf9, 0x7c, 0x22, 0x0e, 0x10, 0x5f, 0xcd, 0x0b, 0x9a, 0xf9, + 0x43, 0x03, 0x16, 0x12, 0x39, 0xc2, 0x89, 0x4a, 0x35, 0xf7, 0xd5, 0x52, 0xcd, 0xc9, 0xf2, 0x1b, + 0xad, 0x68, 0x63, 0xde, 0x81, 0xf4, 0x57, 0xf0, 0xc9, 0xc5, 0x34, 0x1e, 0xbf, 0x98, 0xe6, 0x4f, + 0x72, 0x50, 0x89, 0x1e, 0x0f, 0xa2, 0x57, 0xb5, 0x99, 0xbb, 0xa0, 0xce, 0xdc, 0xa3, 0x71, 0x5d, + 0x30, 0x2a, 0xd3, 0xf8, 0x3e, 0x54, 0xa2, 0xc7, 0xa7, 0x51, 0x29, 0x2a, 0x7b, 0x9c, 0x17, 0x59, + 0x4d, 0xf4, 0xa2, 0x15, 0xc7, 0x78, 0x2c, 0xf4, 0x0d, 0x5f, 0x87, 0xde, 0x75, 0xfa, 0x7d, 0x87, + 0xca, 0x0b, 0xb6, 0x3c, 0xbf, 0x60, 0x8b, 0x42, 0xdf, 0x8d, 0x14, 0x1e, 0x9c, 0x2a, 0x89, 0xb6, + 0xa1, 0x48, 0x03, 0x32, 0xa4, 0xb2, 0xe6, 0xfc, 0x72, 0xa6, 0x77, 0x95, 0x64, 0xc8, 0x53, 0xfa, + 0xc8, 0x44, 0x58, 0x0b, 0xc5, 0x02, 0xc8, 0xfc, 0x37, 0x03, 0xca, 0x21, 0x0b, 0x7a, 0x45, 0x9b, + 0xbc, 0x5a, 0x62, 0xf2, 0x38, 0xdf, 0xff, 0xda, 0xb9, 0x33, 0xc7, 0x06, 0x2c, 0xe8, 0x6f, 0x44, + 0x94, 0x42, 0x92, 0x71, 0x5c, 0x21, 0x09, 0xbd, 0x02, 0x65, 0xab, 0xdf, 0xf7, 0x3e, 0xda, 0x74, + 0x0f, 0x64, 0xf1, 0x36, 0xba, 0x7b, 0x5e, 0x93, 0xed, 0x38, 0xe2, 0x40, 0x07, 0xb0, 0x28, 0xe4, + 0xe2, 0xd7, 0xbf, 0xf9, 0xcc, 0x57, 0xa0, 0x69, 0xe7, 0x58, 0x73, 0x99, 0x45, 0x5e, 0x6d, 0x1d, + 0x13, 0x27, 0x95, 0x34, 0x6f, 0x7f, 0xfa, 0xf9, 0xca, 0xa9, 0x9f, 0x7d, 0xbe, 0x72, 0xea, 0xb3, + 0xcf, 0x57, 0x4e, 0x7d, 0xe7, 0x68, 0xc5, 0xf8, 0xf4, 0x68, 0xc5, 0xf8, 0xd9, 0xd1, 0x8a, 0xf1, + 0xd9, 0xd1, 0x8a, 0xf1, 0x2f, 0x47, 0x2b, 0xc6, 0xef, 0xff, 0xeb, 0xca, 0xa9, 0x6f, 0x3e, 0xfb, + 0xd8, 0x7f, 0x49, 0xf3, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x88, 0x7d, 0x3c, 0xce, 0xb6, 0x46, + 0x00, 0x00, +} + +func (m *BinaryBuildRequestOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BinaryBuildRequestOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BinaryBuildRequestOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.CommitterEmail) + copy(dAtA[i:], m.CommitterEmail) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CommitterEmail))) + i-- + dAtA[i] = 0x42 + i -= len(m.CommitterName) + copy(dAtA[i:], m.CommitterName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CommitterName))) + i-- + dAtA[i] = 0x3a + i -= len(m.AuthorEmail) + copy(dAtA[i:], m.AuthorEmail) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AuthorEmail))) + i-- + dAtA[i] = 0x32 + i -= len(m.AuthorName) + copy(dAtA[i:], m.AuthorName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AuthorName))) + i-- + dAtA[i] = 0x2a + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x22 + i -= len(m.Commit) + copy(dAtA[i:], m.Commit) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Commit))) + i-- + dAtA[i] = 0x1a + i -= len(m.AsFile) + copy(dAtA[i:], m.AsFile) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AsFile))) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *BinaryBuildSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BinaryBuildSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BinaryBuildSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.AsFile) + copy(dAtA[i:], m.AsFile) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AsFile))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *BitbucketWebHookCause) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BitbucketWebHookCause) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BitbucketWebHookCause) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.CommonWebHookCause.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Build) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Build) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Build) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *BuildCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.LastUpdateTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *BuildConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildConfig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *BuildConfigList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildConfigList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildConfigList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *BuildConfigSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildConfigSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildConfigSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.FailedBuildsHistoryLimit != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.FailedBuildsHistoryLimit)) + i-- + dAtA[i] = 0x28 + } + if m.SuccessfulBuildsHistoryLimit != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.SuccessfulBuildsHistoryLimit)) + i-- + dAtA[i] = 0x20 + } + { + size, err := m.CommonSpec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.RunPolicy) + copy(dAtA[i:], m.RunPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RunPolicy))) + i-- + dAtA[i] = 0x12 + if len(m.Triggers) > 0 { + for iNdEx := len(m.Triggers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Triggers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *BuildConfigStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildConfigStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildConfigStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ImageChangeTriggers) > 0 { + for iNdEx := len(m.ImageChangeTriggers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ImageChangeTriggers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + i = encodeVarintGenerated(dAtA, i, uint64(m.LastVersion)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *BuildList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *BuildLog) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildLog) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildLog) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *BuildLogOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildLogOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildLogOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i-- + if m.InsecureSkipTLSVerifyBackend { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x58 + if m.Version != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Version)) + i-- + dAtA[i] = 0x50 + } + i-- + if m.NoWait { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x48 + if m.LimitBytes != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.LimitBytes)) + i-- + dAtA[i] = 0x40 + } + if m.TailLines != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TailLines)) + i-- + dAtA[i] = 0x38 + } + i-- + if m.Timestamps { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + if m.SinceTime != nil { + { + size, err := m.SinceTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.SinceSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.SinceSeconds)) + i-- + dAtA[i] = 0x20 + } + i-- + if m.Previous { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + i-- + if m.Follow { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + i -= len(m.Container) + copy(dAtA[i:], m.Container) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Container))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *BuildOutput) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildOutput) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildOutput) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ImageLabels) > 0 { + for iNdEx := len(m.ImageLabels) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ImageLabels[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.PushSecret != nil { + { + size, err := m.PushSecret.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.To != nil { + { + size, err := m.To.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *BuildPostCommitSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildPostCommitSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildPostCommitSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Script) + copy(dAtA[i:], m.Script) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Script))) + i-- + dAtA[i] = 0x1a + if len(m.Args) > 0 { + for iNdEx := len(m.Args) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Args[iNdEx]) + copy(dAtA[i:], m.Args[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Args[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Command) > 0 { + for iNdEx := len(m.Command) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Command[iNdEx]) + copy(dAtA[i:], m.Command[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Command[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *BuildRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.SourceStrategyOptions != nil { + { + size, err := m.SourceStrategyOptions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } + if m.DockerStrategyOptions != nil { + { + size, err := m.DockerStrategyOptions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + if len(m.TriggeredBy) > 0 { + for iNdEx := len(m.TriggeredBy) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.TriggeredBy[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + } + if len(m.Env) > 0 { + for iNdEx := len(m.Env) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Env[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + } + if m.LastVersion != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.LastVersion)) + i-- + dAtA[i] = 0x30 + } + if m.Binary != nil { + { + size, err := m.Binary.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.From != nil { + { + size, err := m.From.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.TriggeredByImage != nil { + { + size, err := m.TriggeredByImage.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Revision != nil { + { + size, err := m.Revision.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *BuildSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ConfigMaps) > 0 { + for iNdEx := len(m.ConfigMaps) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ConfigMaps[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + } + if len(m.Secrets) > 0 { + for iNdEx := len(m.Secrets) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Secrets[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + } + if m.SourceSecret != nil { + { + size, err := m.SourceSecret.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + i -= len(m.ContextDir) + copy(dAtA[i:], m.ContextDir) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContextDir))) + i-- + dAtA[i] = 0x32 + if len(m.Images) > 0 { + for iNdEx := len(m.Images) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Images[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } + if m.Git != nil { + { + size, err := m.Git.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.Dockerfile != nil { + i -= len(*m.Dockerfile) + copy(dAtA[i:], *m.Dockerfile) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Dockerfile))) + i-- + dAtA[i] = 0x1a + } + if m.Binary != nil { + { + size, err := m.Binary.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *BuildSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.TriggeredBy) > 0 { + for iNdEx := len(m.TriggeredBy) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.TriggeredBy[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.CommonSpec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *BuildStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x6a + } + } + i -= len(m.LogSnippet) + copy(dAtA[i:], m.LogSnippet) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.LogSnippet))) + i-- + dAtA[i] = 0x62 + if len(m.Stages) > 0 { + for iNdEx := len(m.Stages) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Stages[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + } + } + { + size, err := m.Output.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + if m.Config != nil { + { + size, err := m.Config.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + i -= len(m.OutputDockerImageReference) + copy(dAtA[i:], m.OutputDockerImageReference) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.OutputDockerImageReference))) + i-- + dAtA[i] = 0x42 + i = encodeVarintGenerated(dAtA, i, uint64(m.Duration)) + i-- + dAtA[i] = 0x38 + if m.CompletionTimestamp != nil { + { + size, err := m.CompletionTimestamp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + if m.StartTimestamp != nil { + { + size, err := m.StartTimestamp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x22 + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x1a + i-- + if m.Cancelled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + i -= len(m.Phase) + copy(dAtA[i:], m.Phase) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *BuildStatusOutput) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildStatusOutput) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildStatusOutput) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.To != nil { + { + size, err := m.To.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *BuildStatusOutputTo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildStatusOutputTo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildStatusOutputTo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.ImageDigest) + copy(dAtA[i:], m.ImageDigest) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ImageDigest))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *BuildStrategy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildStrategy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.JenkinsPipelineStrategy != nil { + { + size, err := m.JenkinsPipelineStrategy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.CustomStrategy != nil { + { + size, err := m.CustomStrategy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.SourceStrategy != nil { + { + size, err := m.SourceStrategy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.DockerStrategy != nil { + { + size, err := m.DockerStrategy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *BuildTriggerCause) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildTriggerCause) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildTriggerCause) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.BitbucketWebHook != nil { + { + size, err := m.BitbucketWebHook.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + if m.GitLabWebHook != nil { + { + size, err := m.GitLabWebHook.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.ImageChangeBuild != nil { + { + size, err := m.ImageChangeBuild.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.GitHubWebHook != nil { + { + size, err := m.GitHubWebHook.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.GenericWebHook != nil { + { + size, err := m.GenericWebHook.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *BuildTriggerPolicy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildTriggerPolicy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildTriggerPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.BitbucketWebHook != nil { + { + size, err := m.BitbucketWebHook.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + if m.GitLabWebHook != nil { + { + size, err := m.GitLabWebHook.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.ImageChange != nil { + { + size, err := m.ImageChange.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.GenericWebHook != nil { + { + size, err := m.GenericWebHook.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.GitHubWebHook != nil { + { + size, err := m.GitHubWebHook.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *BuildVolume) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildVolume) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildVolume) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Mounts) > 0 { + for iNdEx := len(m.Mounts) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Mounts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + { + size, err := m.Source.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *BuildVolumeMount) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildVolumeMount) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildVolumeMount) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.DestinationPath) + copy(dAtA[i:], m.DestinationPath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DestinationPath))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *BuildVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BuildVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BuildVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.CSI != nil { + { + size, err := m.CSI.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.ConfigMap != nil { + { + size, err := m.ConfigMap.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Secret != nil { + { + size, err := m.Secret.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CommonSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CommonSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CommonSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.MountTrustedCA != nil { + i-- + if *m.MountTrustedCA { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x50 + } + if m.NodeSelector != nil { + { + size, err := m.NodeSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + if m.CompletionDeadlineSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.CompletionDeadlineSeconds)) + i-- + dAtA[i] = 0x40 + } + { + size, err := m.PostCommit.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + { + size, err := m.Resources.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + { + size, err := m.Output.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + { + size, err := m.Strategy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + if m.Revision != nil { + { + size, err := m.Revision.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + { + size, err := m.Source.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.ServiceAccount) + copy(dAtA[i:], m.ServiceAccount) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceAccount))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CommonWebHookCause) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CommonWebHookCause) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CommonWebHookCause) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Secret) + copy(dAtA[i:], m.Secret) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Secret))) + i-- + dAtA[i] = 0x12 + if m.Revision != nil { + { + size, err := m.Revision.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ConfigMapBuildSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConfigMapBuildSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConfigMapBuildSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.DestinationDir) + copy(dAtA[i:], m.DestinationDir) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DestinationDir))) + i-- + dAtA[i] = 0x12 + { + size, err := m.ConfigMap.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CustomBuildStrategy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CustomBuildStrategy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CustomBuildStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.BuildAPIVersion) + copy(dAtA[i:], m.BuildAPIVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.BuildAPIVersion))) + i-- + dAtA[i] = 0x3a + if len(m.Secrets) > 0 { + for iNdEx := len(m.Secrets) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Secrets[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + i-- + if m.ForcePull { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + i-- + if m.ExposeDockerSocket { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + if len(m.Env) > 0 { + for iNdEx := len(m.Env) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Env[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.PullSecret != nil { + { + size, err := m.PullSecret.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + { + size, err := m.From.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DockerBuildStrategy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DockerBuildStrategy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DockerBuildStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Volumes) > 0 { + for iNdEx := len(m.Volumes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Volumes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + } + if m.ImageOptimizationPolicy != nil { + i -= len(*m.ImageOptimizationPolicy) + copy(dAtA[i:], *m.ImageOptimizationPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ImageOptimizationPolicy))) + i-- + dAtA[i] = 0x42 + } + if len(m.BuildArgs) > 0 { + for iNdEx := len(m.BuildArgs) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.BuildArgs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + } + i -= len(m.DockerfilePath) + copy(dAtA[i:], m.DockerfilePath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DockerfilePath))) + i-- + dAtA[i] = 0x32 + i-- + if m.ForcePull { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + if len(m.Env) > 0 { + for iNdEx := len(m.Env) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Env[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + i-- + if m.NoCache { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + if m.PullSecret != nil { + { + size, err := m.PullSecret.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.From != nil { + { + size, err := m.From.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DockerStrategyOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DockerStrategyOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DockerStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.NoCache != nil { + i-- + if *m.NoCache { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if len(m.BuildArgs) > 0 { + for iNdEx := len(m.BuildArgs) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.BuildArgs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *GenericWebHookCause) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GenericWebHookCause) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GenericWebHookCause) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Secret) + copy(dAtA[i:], m.Secret) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Secret))) + i-- + dAtA[i] = 0x12 + if m.Revision != nil { + { + size, err := m.Revision.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GenericWebHookEvent) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GenericWebHookEvent) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GenericWebHookEvent) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.DockerStrategyOptions != nil { + { + size, err := m.DockerStrategyOptions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if len(m.Env) > 0 { + for iNdEx := len(m.Env) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Env[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.Git != nil { + { + size, err := m.Git.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *GitBuildSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GitBuildSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GitBuildSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.ProxyConfig.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Ref) + copy(dAtA[i:], m.Ref) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Ref))) + i-- + dAtA[i] = 0x12 + i -= len(m.URI) + copy(dAtA[i:], m.URI) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.URI))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *GitHubWebHookCause) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GitHubWebHookCause) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GitHubWebHookCause) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Secret) + copy(dAtA[i:], m.Secret) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Secret))) + i-- + dAtA[i] = 0x12 + if m.Revision != nil { + { + size, err := m.Revision.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GitInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GitInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GitInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Refs) > 0 { + for iNdEx := len(m.Refs) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Refs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + { + size, err := m.GitSourceRevision.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.GitBuildSource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *GitLabWebHookCause) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GitLabWebHookCause) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GitLabWebHookCause) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.CommonWebHookCause.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *GitRefInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GitRefInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GitRefInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.GitSourceRevision.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.GitBuildSource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *GitSourceRevision) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GitSourceRevision) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GitSourceRevision) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x22 + { + size, err := m.Committer.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Author.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Commit) + copy(dAtA[i:], m.Commit) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Commit))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageChangeCause) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageChangeCause) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageChangeCause) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.FromRef != nil { + { + size, err := m.FromRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.ImageID) + copy(dAtA[i:], m.ImageID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ImageID))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageChangeTrigger) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageChangeTrigger) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageChangeTrigger) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i-- + if m.Paused { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + if m.From != nil { + { + size, err := m.From.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.LastTriggeredImageID) + copy(dAtA[i:], m.LastTriggeredImageID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.LastTriggeredImageID))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageChangeTriggerStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageChangeTriggerStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageChangeTriggerStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.LastTriggerTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.From.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.LastTriggeredImageID) + copy(dAtA[i:], m.LastTriggeredImageID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.LastTriggeredImageID))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageLabel) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageLabel) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageLabel) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.As) > 0 { + for iNdEx := len(m.As) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.As[iNdEx]) + copy(dAtA[i:], m.As[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.As[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if m.PullSecret != nil { + { + size, err := m.PullSecret.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.Paths) > 0 { + for iNdEx := len(m.Paths) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Paths[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.From.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageSourcePath) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageSourcePath) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageSourcePath) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.DestinationDir) + copy(dAtA[i:], m.DestinationDir) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DestinationDir))) + i-- + dAtA[i] = 0x12 + i -= len(m.SourcePath) + copy(dAtA[i:], m.SourcePath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SourcePath))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageStreamTagReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageStreamTagReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageStreamTagReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *JenkinsPipelineBuildStrategy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *JenkinsPipelineBuildStrategy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *JenkinsPipelineBuildStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Env) > 0 { + for iNdEx := len(m.Env) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Env[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.Jenkinsfile) + copy(dAtA[i:], m.Jenkinsfile) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Jenkinsfile))) + i-- + dAtA[i] = 0x12 + i -= len(m.JenkinsfilePath) + copy(dAtA[i:], m.JenkinsfilePath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.JenkinsfilePath))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m OptionalNodeSelector) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m OptionalNodeSelector) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m OptionalNodeSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m) > 0 { + keysForItems := make([]string, 0, len(m)) + for k := range m { + keysForItems = append(keysForItems, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForItems) + for iNdEx := len(keysForItems) - 1; iNdEx >= 0; iNdEx-- { + v := m[string(keysForItems[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForItems[iNdEx]) + copy(dAtA[i:], keysForItems[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForItems[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ProxyConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ProxyConfig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ProxyConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.NoProxy != nil { + i -= len(*m.NoProxy) + copy(dAtA[i:], *m.NoProxy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.NoProxy))) + i-- + dAtA[i] = 0x2a + } + if m.HTTPSProxy != nil { + i -= len(*m.HTTPSProxy) + copy(dAtA[i:], *m.HTTPSProxy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.HTTPSProxy))) + i-- + dAtA[i] = 0x22 + } + if m.HTTPProxy != nil { + i -= len(*m.HTTPProxy) + copy(dAtA[i:], *m.HTTPProxy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.HTTPProxy))) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} + +func (m *SecretBuildSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SecretBuildSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SecretBuildSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.DestinationDir) + copy(dAtA[i:], m.DestinationDir) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DestinationDir))) + i-- + dAtA[i] = 0x12 + { + size, err := m.Secret.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SecretLocalReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SecretLocalReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SecretLocalReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SecretSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SecretSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SecretSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.MountPath) + copy(dAtA[i:], m.MountPath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MountPath))) + i-- + dAtA[i] = 0x12 + { + size, err := m.SecretSource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SourceBuildStrategy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SourceBuildStrategy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SourceBuildStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Volumes) > 0 { + for iNdEx := len(m.Volumes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Volumes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + } + i-- + if m.ForcePull { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + if m.Incremental != nil { + i-- + if *m.Incremental { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + i -= len(m.Scripts) + copy(dAtA[i:], m.Scripts) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Scripts))) + i-- + dAtA[i] = 0x22 + if len(m.Env) > 0 { + for iNdEx := len(m.Env) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Env[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.PullSecret != nil { + { + size, err := m.PullSecret.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + { + size, err := m.From.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SourceControlUser) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SourceControlUser) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SourceControlUser) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Email) + copy(dAtA[i:], m.Email) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Email))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SourceRevision) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SourceRevision) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SourceRevision) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Git != nil { + { + size, err := m.Git.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SourceStrategyOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SourceStrategyOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SourceStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Incremental != nil { + i-- + if *m.Incremental { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *StageInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StageInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StageInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Steps) > 0 { + for iNdEx := len(m.Steps) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Steps[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + i = encodeVarintGenerated(dAtA, i, uint64(m.DurationMilliseconds)) + i-- + dAtA[i] = 0x18 + { + size, err := m.StartTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *StepInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StepInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StepInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.DurationMilliseconds)) + i-- + dAtA[i] = 0x18 + { + size, err := m.StartTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *WebHookTrigger) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WebHookTrigger) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WebHookTrigger) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.SecretReference != nil { + { + size, err := m.SecretReference.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + i-- + if m.AllowEnv { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + i -= len(m.Secret) + copy(dAtA[i:], m.Secret) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Secret))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *BinaryBuildRequestOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.AsFile) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Commit) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.AuthorName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.AuthorEmail) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.CommitterName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.CommitterEmail) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *BinaryBuildSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.AsFile) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *BitbucketWebHookCause) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.CommonWebHookCause.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Build) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *BuildCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastUpdateTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *BuildConfig) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *BuildConfigList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *BuildConfigSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Triggers) > 0 { + for _, e := range m.Triggers { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.RunPolicy) + n += 1 + l + sovGenerated(uint64(l)) + l = m.CommonSpec.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.SuccessfulBuildsHistoryLimit != nil { + n += 1 + sovGenerated(uint64(*m.SuccessfulBuildsHistoryLimit)) + } + if m.FailedBuildsHistoryLimit != nil { + n += 1 + sovGenerated(uint64(*m.FailedBuildsHistoryLimit)) + } + return n +} + +func (m *BuildConfigStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.LastVersion)) + if len(m.ImageChangeTriggers) > 0 { + for _, e := range m.ImageChangeTriggers { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *BuildList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *BuildLog) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *BuildLogOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Container) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + n += 2 + if m.SinceSeconds != nil { + n += 1 + sovGenerated(uint64(*m.SinceSeconds)) + } + if m.SinceTime != nil { + l = m.SinceTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + if m.TailLines != nil { + n += 1 + sovGenerated(uint64(*m.TailLines)) + } + if m.LimitBytes != nil { + n += 1 + sovGenerated(uint64(*m.LimitBytes)) + } + n += 2 + if m.Version != nil { + n += 1 + sovGenerated(uint64(*m.Version)) + } + n += 2 + return n +} + +func (m *BuildOutput) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.To != nil { + l = m.To.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.PushSecret != nil { + l = m.PushSecret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.ImageLabels) > 0 { + for _, e := range m.ImageLabels { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *BuildPostCommitSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Command) > 0 { + for _, s := range m.Command { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Args) > 0 { + for _, s := range m.Args { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.Script) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *BuildRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Revision != nil { + l = m.Revision.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.TriggeredByImage != nil { + l = m.TriggeredByImage.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.From != nil { + l = m.From.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Binary != nil { + l = m.Binary.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.LastVersion != nil { + n += 1 + sovGenerated(uint64(*m.LastVersion)) + } + if len(m.Env) > 0 { + for _, e := range m.Env { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.TriggeredBy) > 0 { + for _, e := range m.TriggeredBy { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.DockerStrategyOptions != nil { + l = m.DockerStrategyOptions.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.SourceStrategyOptions != nil { + l = m.SourceStrategyOptions.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *BuildSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.Binary != nil { + l = m.Binary.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Dockerfile != nil { + l = len(*m.Dockerfile) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Git != nil { + l = m.Git.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Images) > 0 { + for _, e := range m.Images { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.ContextDir) + n += 1 + l + sovGenerated(uint64(l)) + if m.SourceSecret != nil { + l = m.SourceSecret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Secrets) > 0 { + for _, e := range m.Secrets { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.ConfigMaps) > 0 { + for _, e := range m.ConfigMaps { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *BuildSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.CommonSpec.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.TriggeredBy) > 0 { + for _, e := range m.TriggeredBy { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *BuildStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Phase) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + if m.StartTimestamp != nil { + l = m.StartTimestamp.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.CompletionTimestamp != nil { + l = m.CompletionTimestamp.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 1 + sovGenerated(uint64(m.Duration)) + l = len(m.OutputDockerImageReference) + n += 1 + l + sovGenerated(uint64(l)) + if m.Config != nil { + l = m.Config.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.Output.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Stages) > 0 { + for _, e := range m.Stages { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.LogSnippet) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *BuildStatusOutput) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.To != nil { + l = m.To.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *BuildStatusOutputTo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ImageDigest) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *BuildStrategy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.DockerStrategy != nil { + l = m.DockerStrategy.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.SourceStrategy != nil { + l = m.SourceStrategy.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.CustomStrategy != nil { + l = m.CustomStrategy.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.JenkinsPipelineStrategy != nil { + l = m.JenkinsPipelineStrategy.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *BuildTriggerCause) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + if m.GenericWebHook != nil { + l = m.GenericWebHook.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.GitHubWebHook != nil { + l = m.GitHubWebHook.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ImageChangeBuild != nil { + l = m.ImageChangeBuild.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.GitLabWebHook != nil { + l = m.GitLabWebHook.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.BitbucketWebHook != nil { + l = m.BitbucketWebHook.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *BuildTriggerPolicy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.GitHubWebHook != nil { + l = m.GitHubWebHook.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.GenericWebHook != nil { + l = m.GenericWebHook.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ImageChange != nil { + l = m.ImageChange.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.GitLabWebHook != nil { + l = m.GitLabWebHook.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.BitbucketWebHook != nil { + l = m.BitbucketWebHook.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *BuildVolume) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Source.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Mounts) > 0 { + for _, e := range m.Mounts { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *BuildVolumeMount) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.DestinationPath) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *BuildVolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.Secret != nil { + l = m.Secret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ConfigMap != nil { + l = m.ConfigMap.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.CSI != nil { + l = m.CSI.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *CommonSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ServiceAccount) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Source.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Revision != nil { + l = m.Revision.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.Strategy.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Output.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Resources.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.PostCommit.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.CompletionDeadlineSeconds != nil { + n += 1 + sovGenerated(uint64(*m.CompletionDeadlineSeconds)) + } + if m.NodeSelector != nil { + l = m.NodeSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.MountTrustedCA != nil { + n += 2 + } + return n +} + +func (m *CommonWebHookCause) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Revision != nil { + l = m.Revision.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Secret) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ConfigMapBuildSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ConfigMap.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DestinationDir) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *CustomBuildStrategy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.From.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.PullSecret != nil { + l = m.PullSecret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Env) > 0 { + for _, e := range m.Env { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 2 + n += 2 + if len(m.Secrets) > 0 { + for _, e := range m.Secrets { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.BuildAPIVersion) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DockerBuildStrategy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.From != nil { + l = m.From.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.PullSecret != nil { + l = m.PullSecret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + if len(m.Env) > 0 { + for _, e := range m.Env { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 2 + l = len(m.DockerfilePath) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.BuildArgs) > 0 { + for _, e := range m.BuildArgs { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.ImageOptimizationPolicy != nil { + l = len(*m.ImageOptimizationPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Volumes) > 0 { + for _, e := range m.Volumes { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DockerStrategyOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.BuildArgs) > 0 { + for _, e := range m.BuildArgs { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.NoCache != nil { + n += 2 + } + return n +} + +func (m *GenericWebHookCause) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Revision != nil { + l = m.Revision.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Secret) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *GenericWebHookEvent) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.Git != nil { + l = m.Git.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Env) > 0 { + for _, e := range m.Env { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.DockerStrategyOptions != nil { + l = m.DockerStrategyOptions.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *GitBuildSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.URI) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Ref) + n += 1 + l + sovGenerated(uint64(l)) + l = m.ProxyConfig.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *GitHubWebHookCause) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Revision != nil { + l = m.Revision.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Secret) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *GitInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.GitBuildSource.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.GitSourceRevision.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Refs) > 0 { + for _, e := range m.Refs { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *GitLabWebHookCause) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.CommonWebHookCause.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *GitRefInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.GitBuildSource.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.GitSourceRevision.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *GitSourceRevision) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Commit) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Author.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Committer.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ImageChangeCause) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ImageID) + n += 1 + l + sovGenerated(uint64(l)) + if m.FromRef != nil { + l = m.FromRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ImageChangeTrigger) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.LastTriggeredImageID) + n += 1 + l + sovGenerated(uint64(l)) + if m.From != nil { + l = m.From.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + return n +} + +func (m *ImageChangeTriggerStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.LastTriggeredImageID) + n += 1 + l + sovGenerated(uint64(l)) + l = m.From.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTriggerTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ImageLabel) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Value) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ImageSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.From.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Paths) > 0 { + for _, e := range m.Paths { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.PullSecret != nil { + l = m.PullSecret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.As) > 0 { + for _, s := range m.As { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ImageSourcePath) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.SourcePath) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DestinationDir) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ImageStreamTagReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *JenkinsPipelineBuildStrategy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.JenkinsfilePath) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Jenkinsfile) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Env) > 0 { + for _, e := range m.Env { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m OptionalNodeSelector) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m) > 0 { + for k, v := range m { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *ProxyConfig) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HTTPProxy != nil { + l = len(*m.HTTPProxy) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.HTTPSProxy != nil { + l = len(*m.HTTPSProxy) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NoProxy != nil { + l = len(*m.NoProxy) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *SecretBuildSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Secret.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DestinationDir) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SecretLocalReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SecretSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.SecretSource.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.MountPath) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SourceBuildStrategy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.From.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.PullSecret != nil { + l = m.PullSecret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Env) > 0 { + for _, e := range m.Env { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.Scripts) + n += 1 + l + sovGenerated(uint64(l)) + if m.Incremental != nil { + n += 2 + } + n += 2 + if len(m.Volumes) > 0 { + for _, e := range m.Volumes { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *SourceControlUser) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Email) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SourceRevision) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.Git != nil { + l = m.Git.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *SourceStrategyOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Incremental != nil { + n += 2 + } + return n +} + +func (m *StageInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.StartTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.DurationMilliseconds)) + if len(m.Steps) > 0 { + for _, e := range m.Steps { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *StepInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.StartTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.DurationMilliseconds)) + return n +} + +func (m *WebHookTrigger) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Secret) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if m.SecretReference != nil { + l = m.SecretReference.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *BinaryBuildRequestOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BinaryBuildRequestOptions{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `AsFile:` + fmt.Sprintf("%v", this.AsFile) + `,`, + `Commit:` + fmt.Sprintf("%v", this.Commit) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `AuthorName:` + fmt.Sprintf("%v", this.AuthorName) + `,`, + `AuthorEmail:` + fmt.Sprintf("%v", this.AuthorEmail) + `,`, + `CommitterName:` + fmt.Sprintf("%v", this.CommitterName) + `,`, + `CommitterEmail:` + fmt.Sprintf("%v", this.CommitterEmail) + `,`, + `}`, + }, "") + return s +} +func (this *BinaryBuildSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BinaryBuildSource{`, + `AsFile:` + fmt.Sprintf("%v", this.AsFile) + `,`, + `}`, + }, "") + return s +} +func (this *BitbucketWebHookCause) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BitbucketWebHookCause{`, + `CommonWebHookCause:` + strings.Replace(strings.Replace(this.CommonWebHookCause.String(), "CommonWebHookCause", "CommonWebHookCause", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Build) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Build{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "BuildSpec", "BuildSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "BuildStatus", "BuildStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *BuildCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BuildCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `LastUpdateTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastUpdateTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *BuildConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BuildConfig{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "BuildConfigSpec", "BuildConfigSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "BuildConfigStatus", "BuildConfigStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *BuildConfigList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]BuildConfig{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "BuildConfig", "BuildConfig", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&BuildConfigList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *BuildConfigSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForTriggers := "[]BuildTriggerPolicy{" + for _, f := range this.Triggers { + repeatedStringForTriggers += strings.Replace(strings.Replace(f.String(), "BuildTriggerPolicy", "BuildTriggerPolicy", 1), `&`, ``, 1) + "," + } + repeatedStringForTriggers += "}" + s := strings.Join([]string{`&BuildConfigSpec{`, + `Triggers:` + repeatedStringForTriggers + `,`, + `RunPolicy:` + fmt.Sprintf("%v", this.RunPolicy) + `,`, + `CommonSpec:` + strings.Replace(strings.Replace(this.CommonSpec.String(), "CommonSpec", "CommonSpec", 1), `&`, ``, 1) + `,`, + `SuccessfulBuildsHistoryLimit:` + valueToStringGenerated(this.SuccessfulBuildsHistoryLimit) + `,`, + `FailedBuildsHistoryLimit:` + valueToStringGenerated(this.FailedBuildsHistoryLimit) + `,`, + `}`, + }, "") + return s +} +func (this *BuildConfigStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForImageChangeTriggers := "[]ImageChangeTriggerStatus{" + for _, f := range this.ImageChangeTriggers { + repeatedStringForImageChangeTriggers += strings.Replace(strings.Replace(f.String(), "ImageChangeTriggerStatus", "ImageChangeTriggerStatus", 1), `&`, ``, 1) + "," + } + repeatedStringForImageChangeTriggers += "}" + s := strings.Join([]string{`&BuildConfigStatus{`, + `LastVersion:` + fmt.Sprintf("%v", this.LastVersion) + `,`, + `ImageChangeTriggers:` + repeatedStringForImageChangeTriggers + `,`, + `}`, + }, "") + return s +} +func (this *BuildList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Build{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Build", "Build", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&BuildList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *BuildLog) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BuildLog{`, + `}`, + }, "") + return s +} +func (this *BuildLogOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BuildLogOptions{`, + `Container:` + fmt.Sprintf("%v", this.Container) + `,`, + `Follow:` + fmt.Sprintf("%v", this.Follow) + `,`, + `Previous:` + fmt.Sprintf("%v", this.Previous) + `,`, + `SinceSeconds:` + valueToStringGenerated(this.SinceSeconds) + `,`, + `SinceTime:` + strings.Replace(fmt.Sprintf("%v", this.SinceTime), "Time", "v1.Time", 1) + `,`, + `Timestamps:` + fmt.Sprintf("%v", this.Timestamps) + `,`, + `TailLines:` + valueToStringGenerated(this.TailLines) + `,`, + `LimitBytes:` + valueToStringGenerated(this.LimitBytes) + `,`, + `NoWait:` + fmt.Sprintf("%v", this.NoWait) + `,`, + `Version:` + valueToStringGenerated(this.Version) + `,`, + `InsecureSkipTLSVerifyBackend:` + fmt.Sprintf("%v", this.InsecureSkipTLSVerifyBackend) + `,`, + `}`, + }, "") + return s +} +func (this *BuildOutput) String() string { + if this == nil { + return "nil" + } + repeatedStringForImageLabels := "[]ImageLabel{" + for _, f := range this.ImageLabels { + repeatedStringForImageLabels += strings.Replace(strings.Replace(f.String(), "ImageLabel", "ImageLabel", 1), `&`, ``, 1) + "," + } + repeatedStringForImageLabels += "}" + s := strings.Join([]string{`&BuildOutput{`, + `To:` + strings.Replace(fmt.Sprintf("%v", this.To), "ObjectReference", "v11.ObjectReference", 1) + `,`, + `PushSecret:` + strings.Replace(fmt.Sprintf("%v", this.PushSecret), "LocalObjectReference", "v11.LocalObjectReference", 1) + `,`, + `ImageLabels:` + repeatedStringForImageLabels + `,`, + `}`, + }, "") + return s +} +func (this *BuildPostCommitSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BuildPostCommitSpec{`, + `Command:` + fmt.Sprintf("%v", this.Command) + `,`, + `Args:` + fmt.Sprintf("%v", this.Args) + `,`, + `Script:` + fmt.Sprintf("%v", this.Script) + `,`, + `}`, + }, "") + return s +} +func (this *BuildRequest) String() string { + if this == nil { + return "nil" + } + repeatedStringForEnv := "[]EnvVar{" + for _, f := range this.Env { + repeatedStringForEnv += fmt.Sprintf("%v", f) + "," + } + repeatedStringForEnv += "}" + repeatedStringForTriggeredBy := "[]BuildTriggerCause{" + for _, f := range this.TriggeredBy { + repeatedStringForTriggeredBy += strings.Replace(strings.Replace(f.String(), "BuildTriggerCause", "BuildTriggerCause", 1), `&`, ``, 1) + "," + } + repeatedStringForTriggeredBy += "}" + s := strings.Join([]string{`&BuildRequest{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Revision:` + strings.Replace(this.Revision.String(), "SourceRevision", "SourceRevision", 1) + `,`, + `TriggeredByImage:` + strings.Replace(fmt.Sprintf("%v", this.TriggeredByImage), "ObjectReference", "v11.ObjectReference", 1) + `,`, + `From:` + strings.Replace(fmt.Sprintf("%v", this.From), "ObjectReference", "v11.ObjectReference", 1) + `,`, + `Binary:` + strings.Replace(this.Binary.String(), "BinaryBuildSource", "BinaryBuildSource", 1) + `,`, + `LastVersion:` + valueToStringGenerated(this.LastVersion) + `,`, + `Env:` + repeatedStringForEnv + `,`, + `TriggeredBy:` + repeatedStringForTriggeredBy + `,`, + `DockerStrategyOptions:` + strings.Replace(this.DockerStrategyOptions.String(), "DockerStrategyOptions", "DockerStrategyOptions", 1) + `,`, + `SourceStrategyOptions:` + strings.Replace(this.SourceStrategyOptions.String(), "SourceStrategyOptions", "SourceStrategyOptions", 1) + `,`, + `}`, + }, "") + return s +} +func (this *BuildSource) String() string { + if this == nil { + return "nil" + } + repeatedStringForImages := "[]ImageSource{" + for _, f := range this.Images { + repeatedStringForImages += strings.Replace(strings.Replace(f.String(), "ImageSource", "ImageSource", 1), `&`, ``, 1) + "," + } + repeatedStringForImages += "}" + repeatedStringForSecrets := "[]SecretBuildSource{" + for _, f := range this.Secrets { + repeatedStringForSecrets += strings.Replace(strings.Replace(f.String(), "SecretBuildSource", "SecretBuildSource", 1), `&`, ``, 1) + "," + } + repeatedStringForSecrets += "}" + repeatedStringForConfigMaps := "[]ConfigMapBuildSource{" + for _, f := range this.ConfigMaps { + repeatedStringForConfigMaps += strings.Replace(strings.Replace(f.String(), "ConfigMapBuildSource", "ConfigMapBuildSource", 1), `&`, ``, 1) + "," + } + repeatedStringForConfigMaps += "}" + s := strings.Join([]string{`&BuildSource{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Binary:` + strings.Replace(this.Binary.String(), "BinaryBuildSource", "BinaryBuildSource", 1) + `,`, + `Dockerfile:` + valueToStringGenerated(this.Dockerfile) + `,`, + `Git:` + strings.Replace(this.Git.String(), "GitBuildSource", "GitBuildSource", 1) + `,`, + `Images:` + repeatedStringForImages + `,`, + `ContextDir:` + fmt.Sprintf("%v", this.ContextDir) + `,`, + `SourceSecret:` + strings.Replace(fmt.Sprintf("%v", this.SourceSecret), "LocalObjectReference", "v11.LocalObjectReference", 1) + `,`, + `Secrets:` + repeatedStringForSecrets + `,`, + `ConfigMaps:` + repeatedStringForConfigMaps + `,`, + `}`, + }, "") + return s +} +func (this *BuildSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForTriggeredBy := "[]BuildTriggerCause{" + for _, f := range this.TriggeredBy { + repeatedStringForTriggeredBy += strings.Replace(strings.Replace(f.String(), "BuildTriggerCause", "BuildTriggerCause", 1), `&`, ``, 1) + "," + } + repeatedStringForTriggeredBy += "}" + s := strings.Join([]string{`&BuildSpec{`, + `CommonSpec:` + strings.Replace(strings.Replace(this.CommonSpec.String(), "CommonSpec", "CommonSpec", 1), `&`, ``, 1) + `,`, + `TriggeredBy:` + repeatedStringForTriggeredBy + `,`, + `}`, + }, "") + return s +} +func (this *BuildStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForStages := "[]StageInfo{" + for _, f := range this.Stages { + repeatedStringForStages += strings.Replace(strings.Replace(f.String(), "StageInfo", "StageInfo", 1), `&`, ``, 1) + "," + } + repeatedStringForStages += "}" + repeatedStringForConditions := "[]BuildCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "BuildCondition", "BuildCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&BuildStatus{`, + `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, + `Cancelled:` + fmt.Sprintf("%v", this.Cancelled) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `StartTimestamp:` + strings.Replace(fmt.Sprintf("%v", this.StartTimestamp), "Time", "v1.Time", 1) + `,`, + `CompletionTimestamp:` + strings.Replace(fmt.Sprintf("%v", this.CompletionTimestamp), "Time", "v1.Time", 1) + `,`, + `Duration:` + fmt.Sprintf("%v", this.Duration) + `,`, + `OutputDockerImageReference:` + fmt.Sprintf("%v", this.OutputDockerImageReference) + `,`, + `Config:` + strings.Replace(fmt.Sprintf("%v", this.Config), "ObjectReference", "v11.ObjectReference", 1) + `,`, + `Output:` + strings.Replace(strings.Replace(this.Output.String(), "BuildStatusOutput", "BuildStatusOutput", 1), `&`, ``, 1) + `,`, + `Stages:` + repeatedStringForStages + `,`, + `LogSnippet:` + fmt.Sprintf("%v", this.LogSnippet) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s +} +func (this *BuildStatusOutput) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BuildStatusOutput{`, + `To:` + strings.Replace(this.To.String(), "BuildStatusOutputTo", "BuildStatusOutputTo", 1) + `,`, + `}`, + }, "") + return s +} +func (this *BuildStatusOutputTo) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BuildStatusOutputTo{`, + `ImageDigest:` + fmt.Sprintf("%v", this.ImageDigest) + `,`, + `}`, + }, "") + return s +} +func (this *BuildStrategy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BuildStrategy{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `DockerStrategy:` + strings.Replace(this.DockerStrategy.String(), "DockerBuildStrategy", "DockerBuildStrategy", 1) + `,`, + `SourceStrategy:` + strings.Replace(this.SourceStrategy.String(), "SourceBuildStrategy", "SourceBuildStrategy", 1) + `,`, + `CustomStrategy:` + strings.Replace(this.CustomStrategy.String(), "CustomBuildStrategy", "CustomBuildStrategy", 1) + `,`, + `JenkinsPipelineStrategy:` + strings.Replace(this.JenkinsPipelineStrategy.String(), "JenkinsPipelineBuildStrategy", "JenkinsPipelineBuildStrategy", 1) + `,`, + `}`, + }, "") + return s +} +func (this *BuildTriggerCause) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BuildTriggerCause{`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `GenericWebHook:` + strings.Replace(this.GenericWebHook.String(), "GenericWebHookCause", "GenericWebHookCause", 1) + `,`, + `GitHubWebHook:` + strings.Replace(this.GitHubWebHook.String(), "GitHubWebHookCause", "GitHubWebHookCause", 1) + `,`, + `ImageChangeBuild:` + strings.Replace(this.ImageChangeBuild.String(), "ImageChangeCause", "ImageChangeCause", 1) + `,`, + `GitLabWebHook:` + strings.Replace(this.GitLabWebHook.String(), "GitLabWebHookCause", "GitLabWebHookCause", 1) + `,`, + `BitbucketWebHook:` + strings.Replace(this.BitbucketWebHook.String(), "BitbucketWebHookCause", "BitbucketWebHookCause", 1) + `,`, + `}`, + }, "") + return s +} +func (this *BuildTriggerPolicy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BuildTriggerPolicy{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `GitHubWebHook:` + strings.Replace(this.GitHubWebHook.String(), "WebHookTrigger", "WebHookTrigger", 1) + `,`, + `GenericWebHook:` + strings.Replace(this.GenericWebHook.String(), "WebHookTrigger", "WebHookTrigger", 1) + `,`, + `ImageChange:` + strings.Replace(this.ImageChange.String(), "ImageChangeTrigger", "ImageChangeTrigger", 1) + `,`, + `GitLabWebHook:` + strings.Replace(this.GitLabWebHook.String(), "WebHookTrigger", "WebHookTrigger", 1) + `,`, + `BitbucketWebHook:` + strings.Replace(this.BitbucketWebHook.String(), "WebHookTrigger", "WebHookTrigger", 1) + `,`, + `}`, + }, "") + return s +} +func (this *BuildVolume) String() string { + if this == nil { + return "nil" + } + repeatedStringForMounts := "[]BuildVolumeMount{" + for _, f := range this.Mounts { + repeatedStringForMounts += strings.Replace(strings.Replace(f.String(), "BuildVolumeMount", "BuildVolumeMount", 1), `&`, ``, 1) + "," + } + repeatedStringForMounts += "}" + s := strings.Join([]string{`&BuildVolume{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Source:` + strings.Replace(strings.Replace(this.Source.String(), "BuildVolumeSource", "BuildVolumeSource", 1), `&`, ``, 1) + `,`, + `Mounts:` + repeatedStringForMounts + `,`, + `}`, + }, "") + return s +} +func (this *BuildVolumeMount) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BuildVolumeMount{`, + `DestinationPath:` + fmt.Sprintf("%v", this.DestinationPath) + `,`, + `}`, + }, "") + return s +} +func (this *BuildVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BuildVolumeSource{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Secret:` + strings.Replace(fmt.Sprintf("%v", this.Secret), "SecretVolumeSource", "v11.SecretVolumeSource", 1) + `,`, + `ConfigMap:` + strings.Replace(fmt.Sprintf("%v", this.ConfigMap), "ConfigMapVolumeSource", "v11.ConfigMapVolumeSource", 1) + `,`, + `CSI:` + strings.Replace(fmt.Sprintf("%v", this.CSI), "CSIVolumeSource", "v11.CSIVolumeSource", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CommonSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CommonSpec{`, + `ServiceAccount:` + fmt.Sprintf("%v", this.ServiceAccount) + `,`, + `Source:` + strings.Replace(strings.Replace(this.Source.String(), "BuildSource", "BuildSource", 1), `&`, ``, 1) + `,`, + `Revision:` + strings.Replace(this.Revision.String(), "SourceRevision", "SourceRevision", 1) + `,`, + `Strategy:` + strings.Replace(strings.Replace(this.Strategy.String(), "BuildStrategy", "BuildStrategy", 1), `&`, ``, 1) + `,`, + `Output:` + strings.Replace(strings.Replace(this.Output.String(), "BuildOutput", "BuildOutput", 1), `&`, ``, 1) + `,`, + `Resources:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Resources), "ResourceRequirements", "v11.ResourceRequirements", 1), `&`, ``, 1) + `,`, + `PostCommit:` + strings.Replace(strings.Replace(this.PostCommit.String(), "BuildPostCommitSpec", "BuildPostCommitSpec", 1), `&`, ``, 1) + `,`, + `CompletionDeadlineSeconds:` + valueToStringGenerated(this.CompletionDeadlineSeconds) + `,`, + `NodeSelector:` + strings.Replace(fmt.Sprintf("%v", this.NodeSelector), "OptionalNodeSelector", "OptionalNodeSelector", 1) + `,`, + `MountTrustedCA:` + valueToStringGenerated(this.MountTrustedCA) + `,`, + `}`, + }, "") + return s +} +func (this *CommonWebHookCause) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CommonWebHookCause{`, + `Revision:` + strings.Replace(this.Revision.String(), "SourceRevision", "SourceRevision", 1) + `,`, + `Secret:` + fmt.Sprintf("%v", this.Secret) + `,`, + `}`, + }, "") + return s +} +func (this *ConfigMapBuildSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ConfigMapBuildSource{`, + `ConfigMap:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ConfigMap), "LocalObjectReference", "v11.LocalObjectReference", 1), `&`, ``, 1) + `,`, + `DestinationDir:` + fmt.Sprintf("%v", this.DestinationDir) + `,`, + `}`, + }, "") + return s +} +func (this *CustomBuildStrategy) String() string { + if this == nil { + return "nil" + } + repeatedStringForEnv := "[]EnvVar{" + for _, f := range this.Env { + repeatedStringForEnv += fmt.Sprintf("%v", f) + "," + } + repeatedStringForEnv += "}" + repeatedStringForSecrets := "[]SecretSpec{" + for _, f := range this.Secrets { + repeatedStringForSecrets += strings.Replace(strings.Replace(f.String(), "SecretSpec", "SecretSpec", 1), `&`, ``, 1) + "," + } + repeatedStringForSecrets += "}" + s := strings.Join([]string{`&CustomBuildStrategy{`, + `From:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.From), "ObjectReference", "v11.ObjectReference", 1), `&`, ``, 1) + `,`, + `PullSecret:` + strings.Replace(fmt.Sprintf("%v", this.PullSecret), "LocalObjectReference", "v11.LocalObjectReference", 1) + `,`, + `Env:` + repeatedStringForEnv + `,`, + `ExposeDockerSocket:` + fmt.Sprintf("%v", this.ExposeDockerSocket) + `,`, + `ForcePull:` + fmt.Sprintf("%v", this.ForcePull) + `,`, + `Secrets:` + repeatedStringForSecrets + `,`, + `BuildAPIVersion:` + fmt.Sprintf("%v", this.BuildAPIVersion) + `,`, + `}`, + }, "") + return s +} +func (this *DockerBuildStrategy) String() string { + if this == nil { + return "nil" + } + repeatedStringForEnv := "[]EnvVar{" + for _, f := range this.Env { + repeatedStringForEnv += fmt.Sprintf("%v", f) + "," + } + repeatedStringForEnv += "}" + repeatedStringForBuildArgs := "[]EnvVar{" + for _, f := range this.BuildArgs { + repeatedStringForBuildArgs += fmt.Sprintf("%v", f) + "," + } + repeatedStringForBuildArgs += "}" + repeatedStringForVolumes := "[]BuildVolume{" + for _, f := range this.Volumes { + repeatedStringForVolumes += strings.Replace(strings.Replace(f.String(), "BuildVolume", "BuildVolume", 1), `&`, ``, 1) + "," + } + repeatedStringForVolumes += "}" + s := strings.Join([]string{`&DockerBuildStrategy{`, + `From:` + strings.Replace(fmt.Sprintf("%v", this.From), "ObjectReference", "v11.ObjectReference", 1) + `,`, + `PullSecret:` + strings.Replace(fmt.Sprintf("%v", this.PullSecret), "LocalObjectReference", "v11.LocalObjectReference", 1) + `,`, + `NoCache:` + fmt.Sprintf("%v", this.NoCache) + `,`, + `Env:` + repeatedStringForEnv + `,`, + `ForcePull:` + fmt.Sprintf("%v", this.ForcePull) + `,`, + `DockerfilePath:` + fmt.Sprintf("%v", this.DockerfilePath) + `,`, + `BuildArgs:` + repeatedStringForBuildArgs + `,`, + `ImageOptimizationPolicy:` + valueToStringGenerated(this.ImageOptimizationPolicy) + `,`, + `Volumes:` + repeatedStringForVolumes + `,`, + `}`, + }, "") + return s +} +func (this *DockerStrategyOptions) String() string { + if this == nil { + return "nil" + } + repeatedStringForBuildArgs := "[]EnvVar{" + for _, f := range this.BuildArgs { + repeatedStringForBuildArgs += fmt.Sprintf("%v", f) + "," + } + repeatedStringForBuildArgs += "}" + s := strings.Join([]string{`&DockerStrategyOptions{`, + `BuildArgs:` + repeatedStringForBuildArgs + `,`, + `NoCache:` + valueToStringGenerated(this.NoCache) + `,`, + `}`, + }, "") + return s +} +func (this *GenericWebHookCause) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GenericWebHookCause{`, + `Revision:` + strings.Replace(this.Revision.String(), "SourceRevision", "SourceRevision", 1) + `,`, + `Secret:` + fmt.Sprintf("%v", this.Secret) + `,`, + `}`, + }, "") + return s +} +func (this *GenericWebHookEvent) String() string { + if this == nil { + return "nil" + } + repeatedStringForEnv := "[]EnvVar{" + for _, f := range this.Env { + repeatedStringForEnv += fmt.Sprintf("%v", f) + "," + } + repeatedStringForEnv += "}" + s := strings.Join([]string{`&GenericWebHookEvent{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Git:` + strings.Replace(this.Git.String(), "GitInfo", "GitInfo", 1) + `,`, + `Env:` + repeatedStringForEnv + `,`, + `DockerStrategyOptions:` + strings.Replace(this.DockerStrategyOptions.String(), "DockerStrategyOptions", "DockerStrategyOptions", 1) + `,`, + `}`, + }, "") + return s +} +func (this *GitBuildSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GitBuildSource{`, + `URI:` + fmt.Sprintf("%v", this.URI) + `,`, + `Ref:` + fmt.Sprintf("%v", this.Ref) + `,`, + `ProxyConfig:` + strings.Replace(strings.Replace(this.ProxyConfig.String(), "ProxyConfig", "ProxyConfig", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *GitHubWebHookCause) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GitHubWebHookCause{`, + `Revision:` + strings.Replace(this.Revision.String(), "SourceRevision", "SourceRevision", 1) + `,`, + `Secret:` + fmt.Sprintf("%v", this.Secret) + `,`, + `}`, + }, "") + return s +} +func (this *GitInfo) String() string { + if this == nil { + return "nil" + } + repeatedStringForRefs := "[]GitRefInfo{" + for _, f := range this.Refs { + repeatedStringForRefs += strings.Replace(strings.Replace(f.String(), "GitRefInfo", "GitRefInfo", 1), `&`, ``, 1) + "," + } + repeatedStringForRefs += "}" + s := strings.Join([]string{`&GitInfo{`, + `GitBuildSource:` + strings.Replace(strings.Replace(this.GitBuildSource.String(), "GitBuildSource", "GitBuildSource", 1), `&`, ``, 1) + `,`, + `GitSourceRevision:` + strings.Replace(strings.Replace(this.GitSourceRevision.String(), "GitSourceRevision", "GitSourceRevision", 1), `&`, ``, 1) + `,`, + `Refs:` + repeatedStringForRefs + `,`, + `}`, + }, "") + return s +} +func (this *GitLabWebHookCause) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GitLabWebHookCause{`, + `CommonWebHookCause:` + strings.Replace(strings.Replace(this.CommonWebHookCause.String(), "CommonWebHookCause", "CommonWebHookCause", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *GitRefInfo) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GitRefInfo{`, + `GitBuildSource:` + strings.Replace(strings.Replace(this.GitBuildSource.String(), "GitBuildSource", "GitBuildSource", 1), `&`, ``, 1) + `,`, + `GitSourceRevision:` + strings.Replace(strings.Replace(this.GitSourceRevision.String(), "GitSourceRevision", "GitSourceRevision", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *GitSourceRevision) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GitSourceRevision{`, + `Commit:` + fmt.Sprintf("%v", this.Commit) + `,`, + `Author:` + strings.Replace(strings.Replace(this.Author.String(), "SourceControlUser", "SourceControlUser", 1), `&`, ``, 1) + `,`, + `Committer:` + strings.Replace(strings.Replace(this.Committer.String(), "SourceControlUser", "SourceControlUser", 1), `&`, ``, 1) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *ImageChangeCause) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ImageChangeCause{`, + `ImageID:` + fmt.Sprintf("%v", this.ImageID) + `,`, + `FromRef:` + strings.Replace(fmt.Sprintf("%v", this.FromRef), "ObjectReference", "v11.ObjectReference", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ImageChangeTrigger) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ImageChangeTrigger{`, + `LastTriggeredImageID:` + fmt.Sprintf("%v", this.LastTriggeredImageID) + `,`, + `From:` + strings.Replace(fmt.Sprintf("%v", this.From), "ObjectReference", "v11.ObjectReference", 1) + `,`, + `Paused:` + fmt.Sprintf("%v", this.Paused) + `,`, + `}`, + }, "") + return s +} +func (this *ImageChangeTriggerStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ImageChangeTriggerStatus{`, + `LastTriggeredImageID:` + fmt.Sprintf("%v", this.LastTriggeredImageID) + `,`, + `From:` + strings.Replace(strings.Replace(this.From.String(), "ImageStreamTagReference", "ImageStreamTagReference", 1), `&`, ``, 1) + `,`, + `LastTriggerTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTriggerTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ImageLabel) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ImageLabel{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *ImageSource) String() string { + if this == nil { + return "nil" + } + repeatedStringForPaths := "[]ImageSourcePath{" + for _, f := range this.Paths { + repeatedStringForPaths += strings.Replace(strings.Replace(f.String(), "ImageSourcePath", "ImageSourcePath", 1), `&`, ``, 1) + "," + } + repeatedStringForPaths += "}" + s := strings.Join([]string{`&ImageSource{`, + `From:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.From), "ObjectReference", "v11.ObjectReference", 1), `&`, ``, 1) + `,`, + `Paths:` + repeatedStringForPaths + `,`, + `PullSecret:` + strings.Replace(fmt.Sprintf("%v", this.PullSecret), "LocalObjectReference", "v11.LocalObjectReference", 1) + `,`, + `As:` + fmt.Sprintf("%v", this.As) + `,`, + `}`, + }, "") + return s +} +func (this *ImageSourcePath) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ImageSourcePath{`, + `SourcePath:` + fmt.Sprintf("%v", this.SourcePath) + `,`, + `DestinationDir:` + fmt.Sprintf("%v", this.DestinationDir) + `,`, + `}`, + }, "") + return s +} +func (this *ImageStreamTagReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ImageStreamTagReference{`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *JenkinsPipelineBuildStrategy) String() string { + if this == nil { + return "nil" + } + repeatedStringForEnv := "[]EnvVar{" + for _, f := range this.Env { + repeatedStringForEnv += fmt.Sprintf("%v", f) + "," + } + repeatedStringForEnv += "}" + s := strings.Join([]string{`&JenkinsPipelineBuildStrategy{`, + `JenkinsfilePath:` + fmt.Sprintf("%v", this.JenkinsfilePath) + `,`, + `Jenkinsfile:` + fmt.Sprintf("%v", this.Jenkinsfile) + `,`, + `Env:` + repeatedStringForEnv + `,`, + `}`, + }, "") + return s +} +func (this *ProxyConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ProxyConfig{`, + `HTTPProxy:` + valueToStringGenerated(this.HTTPProxy) + `,`, + `HTTPSProxy:` + valueToStringGenerated(this.HTTPSProxy) + `,`, + `NoProxy:` + valueToStringGenerated(this.NoProxy) + `,`, + `}`, + }, "") + return s +} +func (this *SecretBuildSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SecretBuildSource{`, + `Secret:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Secret), "LocalObjectReference", "v11.LocalObjectReference", 1), `&`, ``, 1) + `,`, + `DestinationDir:` + fmt.Sprintf("%v", this.DestinationDir) + `,`, + `}`, + }, "") + return s +} +func (this *SecretLocalReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SecretLocalReference{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *SecretSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SecretSpec{`, + `SecretSource:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.SecretSource), "LocalObjectReference", "v11.LocalObjectReference", 1), `&`, ``, 1) + `,`, + `MountPath:` + fmt.Sprintf("%v", this.MountPath) + `,`, + `}`, + }, "") + return s +} +func (this *SourceBuildStrategy) String() string { + if this == nil { + return "nil" + } + repeatedStringForEnv := "[]EnvVar{" + for _, f := range this.Env { + repeatedStringForEnv += fmt.Sprintf("%v", f) + "," + } + repeatedStringForEnv += "}" + repeatedStringForVolumes := "[]BuildVolume{" + for _, f := range this.Volumes { + repeatedStringForVolumes += strings.Replace(strings.Replace(f.String(), "BuildVolume", "BuildVolume", 1), `&`, ``, 1) + "," + } + repeatedStringForVolumes += "}" + s := strings.Join([]string{`&SourceBuildStrategy{`, + `From:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.From), "ObjectReference", "v11.ObjectReference", 1), `&`, ``, 1) + `,`, + `PullSecret:` + strings.Replace(fmt.Sprintf("%v", this.PullSecret), "LocalObjectReference", "v11.LocalObjectReference", 1) + `,`, + `Env:` + repeatedStringForEnv + `,`, + `Scripts:` + fmt.Sprintf("%v", this.Scripts) + `,`, + `Incremental:` + valueToStringGenerated(this.Incremental) + `,`, + `ForcePull:` + fmt.Sprintf("%v", this.ForcePull) + `,`, + `Volumes:` + repeatedStringForVolumes + `,`, + `}`, + }, "") + return s +} +func (this *SourceControlUser) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SourceControlUser{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Email:` + fmt.Sprintf("%v", this.Email) + `,`, + `}`, + }, "") + return s +} +func (this *SourceRevision) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SourceRevision{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Git:` + strings.Replace(this.Git.String(), "GitSourceRevision", "GitSourceRevision", 1) + `,`, + `}`, + }, "") + return s +} +func (this *SourceStrategyOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SourceStrategyOptions{`, + `Incremental:` + valueToStringGenerated(this.Incremental) + `,`, + `}`, + }, "") + return s +} +func (this *StageInfo) String() string { + if this == nil { + return "nil" + } + repeatedStringForSteps := "[]StepInfo{" + for _, f := range this.Steps { + repeatedStringForSteps += strings.Replace(strings.Replace(f.String(), "StepInfo", "StepInfo", 1), `&`, ``, 1) + "," + } + repeatedStringForSteps += "}" + s := strings.Join([]string{`&StageInfo{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `StartTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.StartTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `DurationMilliseconds:` + fmt.Sprintf("%v", this.DurationMilliseconds) + `,`, + `Steps:` + repeatedStringForSteps + `,`, + `}`, + }, "") + return s +} +func (this *StepInfo) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StepInfo{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `StartTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.StartTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `DurationMilliseconds:` + fmt.Sprintf("%v", this.DurationMilliseconds) + `,`, + `}`, + }, "") + return s +} +func (this *WebHookTrigger) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WebHookTrigger{`, + `Secret:` + fmt.Sprintf("%v", this.Secret) + `,`, + `AllowEnv:` + fmt.Sprintf("%v", this.AllowEnv) + `,`, + `SecretReference:` + strings.Replace(this.SecretReference.String(), "SecretLocalReference", "SecretLocalReference", 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *BinaryBuildRequestOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BinaryBuildRequestOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BinaryBuildRequestOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AsFile", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AsFile = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Commit = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthorName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AuthorName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthorEmail", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AuthorEmail = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CommitterName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CommitterName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CommitterEmail", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CommitterEmail = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BinaryBuildSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BinaryBuildSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BinaryBuildSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AsFile", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AsFile = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BitbucketWebHookCause) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BitbucketWebHookCause: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BitbucketWebHookCause: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CommonWebHookCause", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.CommonWebHookCause.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Build) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Build: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Build: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = BuildConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastUpdateTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastUpdateTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildConfigList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildConfigList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildConfigList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, BuildConfig{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildConfigSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildConfigSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildConfigSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Triggers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Triggers = append(m.Triggers, BuildTriggerPolicy{}) + if err := m.Triggers[len(m.Triggers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RunPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RunPolicy = BuildRunPolicy(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CommonSpec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.CommonSpec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SuccessfulBuildsHistoryLimit", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SuccessfulBuildsHistoryLimit = &v + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FailedBuildsHistoryLimit", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.FailedBuildsHistoryLimit = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildConfigStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildConfigStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildConfigStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LastVersion", wireType) + } + m.LastVersion = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LastVersion |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImageChangeTriggers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ImageChangeTriggers = append(m.ImageChangeTriggers, ImageChangeTriggerStatus{}) + if err := m.ImageChangeTriggers[len(m.ImageChangeTriggers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Build{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildLog) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildLog: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildLog: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildLogOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildLogOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildLogOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Container = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Follow", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Follow = bool(v != 0) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Previous", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Previous = bool(v != 0) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SinceSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SinceSeconds = &v + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SinceTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SinceTime == nil { + m.SinceTime = &v1.Time{} + } + if err := m.SinceTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamps", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Timestamps = bool(v != 0) + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TailLines", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TailLines = &v + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LimitBytes", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.LimitBytes = &v + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NoWait", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.NoWait = bool(v != 0) + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Version = &v + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InsecureSkipTLSVerifyBackend", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.InsecureSkipTLSVerifyBackend = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildOutput) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildOutput: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildOutput: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field To", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.To == nil { + m.To = &v11.ObjectReference{} + } + if err := m.To.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PushSecret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PushSecret == nil { + m.PushSecret = &v11.LocalObjectReference{} + } + if err := m.PushSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImageLabels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ImageLabels = append(m.ImageLabels, ImageLabel{}) + if err := m.ImageLabels[len(m.ImageLabels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildPostCommitSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildPostCommitSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildPostCommitSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Command = append(m.Command, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Args", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Args = append(m.Args, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Script", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Script = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Revision == nil { + m.Revision = &SourceRevision{} + } + if err := m.Revision.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TriggeredByImage", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TriggeredByImage == nil { + m.TriggeredByImage = &v11.ObjectReference{} + } + if err := m.TriggeredByImage.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.From == nil { + m.From = &v11.ObjectReference{} + } + if err := m.From.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Binary", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Binary == nil { + m.Binary = &BinaryBuildSource{} + } + if err := m.Binary.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LastVersion", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.LastVersion = &v + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Env = append(m.Env, v11.EnvVar{}) + if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TriggeredBy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TriggeredBy = append(m.TriggeredBy, BuildTriggerCause{}) + if err := m.TriggeredBy[len(m.TriggeredBy)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DockerStrategyOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DockerStrategyOptions == nil { + m.DockerStrategyOptions = &DockerStrategyOptions{} + } + if err := m.DockerStrategyOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SourceStrategyOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SourceStrategyOptions == nil { + m.SourceStrategyOptions = &SourceStrategyOptions{} + } + if err := m.SourceStrategyOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = BuildSourceType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Binary", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Binary == nil { + m.Binary = &BinaryBuildSource{} + } + if err := m.Binary.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Dockerfile", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Dockerfile = &s + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Git", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Git == nil { + m.Git = &GitBuildSource{} + } + if err := m.Git.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Images", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Images = append(m.Images, ImageSource{}) + if err := m.Images[len(m.Images)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContextDir", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContextDir = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SourceSecret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SourceSecret == nil { + m.SourceSecret = &v11.LocalObjectReference{} + } + if err := m.SourceSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secrets", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Secrets = append(m.Secrets, SecretBuildSource{}) + if err := m.Secrets[len(m.Secrets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConfigMaps", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ConfigMaps = append(m.ConfigMaps, ConfigMapBuildSource{}) + if err := m.ConfigMaps[len(m.ConfigMaps)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CommonSpec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.CommonSpec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TriggeredBy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TriggeredBy = append(m.TriggeredBy, BuildTriggerCause{}) + if err := m.TriggeredBy[len(m.TriggeredBy)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Phase = BuildPhase(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Cancelled", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Cancelled = bool(v != 0) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = StatusReason(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StartTimestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.StartTimestamp == nil { + m.StartTimestamp = &v1.Time{} + } + if err := m.StartTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CompletionTimestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CompletionTimestamp == nil { + m.CompletionTimestamp = &v1.Time{} + } + if err := m.CompletionTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Duration", wireType) + } + m.Duration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Duration |= time.Duration(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OutputDockerImageReference", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OutputDockerImageReference = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Config == nil { + m.Config = &v11.ObjectReference{} + } + if err := m.Config.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Output", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Output.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Stages", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Stages = append(m.Stages, StageInfo{}) + if err := m.Stages[len(m.Stages)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LogSnippet", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LogSnippet = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, BuildCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildStatusOutput) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildStatusOutput: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildStatusOutput: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field To", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.To == nil { + m.To = &BuildStatusOutputTo{} + } + if err := m.To.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildStatusOutputTo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildStatusOutputTo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildStatusOutputTo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImageDigest", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ImageDigest = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildStrategy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildStrategy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = BuildStrategyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DockerStrategy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DockerStrategy == nil { + m.DockerStrategy = &DockerBuildStrategy{} + } + if err := m.DockerStrategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SourceStrategy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SourceStrategy == nil { + m.SourceStrategy = &SourceBuildStrategy{} + } + if err := m.SourceStrategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CustomStrategy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CustomStrategy == nil { + m.CustomStrategy = &CustomBuildStrategy{} + } + if err := m.CustomStrategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field JenkinsPipelineStrategy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.JenkinsPipelineStrategy == nil { + m.JenkinsPipelineStrategy = &JenkinsPipelineBuildStrategy{} + } + if err := m.JenkinsPipelineStrategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildTriggerCause) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildTriggerCause: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildTriggerCause: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GenericWebHook", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.GenericWebHook == nil { + m.GenericWebHook = &GenericWebHookCause{} + } + if err := m.GenericWebHook.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GitHubWebHook", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.GitHubWebHook == nil { + m.GitHubWebHook = &GitHubWebHookCause{} + } + if err := m.GitHubWebHook.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImageChangeBuild", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ImageChangeBuild == nil { + m.ImageChangeBuild = &ImageChangeCause{} + } + if err := m.ImageChangeBuild.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GitLabWebHook", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.GitLabWebHook == nil { + m.GitLabWebHook = &GitLabWebHookCause{} + } + if err := m.GitLabWebHook.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BitbucketWebHook", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.BitbucketWebHook == nil { + m.BitbucketWebHook = &BitbucketWebHookCause{} + } + if err := m.BitbucketWebHook.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildTriggerPolicy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildTriggerPolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildTriggerPolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = BuildTriggerType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GitHubWebHook", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.GitHubWebHook == nil { + m.GitHubWebHook = &WebHookTrigger{} + } + if err := m.GitHubWebHook.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GenericWebHook", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.GenericWebHook == nil { + m.GenericWebHook = &WebHookTrigger{} + } + if err := m.GenericWebHook.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImageChange", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ImageChange == nil { + m.ImageChange = &ImageChangeTrigger{} + } + if err := m.ImageChange.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GitLabWebHook", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.GitLabWebHook == nil { + m.GitLabWebHook = &WebHookTrigger{} + } + if err := m.GitLabWebHook.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BitbucketWebHook", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.BitbucketWebHook == nil { + m.BitbucketWebHook = &WebHookTrigger{} + } + if err := m.BitbucketWebHook.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildVolume) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildVolume: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildVolume: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Source.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Mounts", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Mounts = append(m.Mounts, BuildVolumeMount{}) + if err := m.Mounts[len(m.Mounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildVolumeMount) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildVolumeMount: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildVolumeMount: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DestinationPath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DestinationPath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = BuildVolumeSourceType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Secret == nil { + m.Secret = &v11.SecretVolumeSource{} + } + if err := m.Secret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConfigMap", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ConfigMap == nil { + m.ConfigMap = &v11.ConfigMapVolumeSource{} + } + if err := m.ConfigMap.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CSI", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CSI == nil { + m.CSI = &v11.CSIVolumeSource{} + } + if err := m.CSI.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CommonSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CommonSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CommonSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceAccount", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceAccount = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Source.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Revision == nil { + m.Revision = &SourceRevision{} + } + if err := m.Revision.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Strategy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Strategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Output", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Output.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PostCommit", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.PostCommit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CompletionDeadlineSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.CompletionDeadlineSeconds = &v + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodeSelector == nil { + m.NodeSelector = OptionalNodeSelector{} + } + if err := m.NodeSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MountTrustedCA", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.MountTrustedCA = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CommonWebHookCause) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CommonWebHookCause: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CommonWebHookCause: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Revision == nil { + m.Revision = &SourceRevision{} + } + if err := m.Revision.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Secret = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfigMapBuildSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfigMapBuildSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfigMapBuildSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConfigMap", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ConfigMap.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DestinationDir", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DestinationDir = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomBuildStrategy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomBuildStrategy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomBuildStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.From.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PullSecret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PullSecret == nil { + m.PullSecret = &v11.LocalObjectReference{} + } + if err := m.PullSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Env = append(m.Env, v11.EnvVar{}) + if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExposeDockerSocket", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ExposeDockerSocket = bool(v != 0) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ForcePull", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ForcePull = bool(v != 0) + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secrets", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Secrets = append(m.Secrets, SecretSpec{}) + if err := m.Secrets[len(m.Secrets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BuildAPIVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.BuildAPIVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DockerBuildStrategy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DockerBuildStrategy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DockerBuildStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.From == nil { + m.From = &v11.ObjectReference{} + } + if err := m.From.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PullSecret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PullSecret == nil { + m.PullSecret = &v11.LocalObjectReference{} + } + if err := m.PullSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NoCache", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.NoCache = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Env = append(m.Env, v11.EnvVar{}) + if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ForcePull", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ForcePull = bool(v != 0) + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DockerfilePath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DockerfilePath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BuildArgs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.BuildArgs = append(m.BuildArgs, v11.EnvVar{}) + if err := m.BuildArgs[len(m.BuildArgs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImageOptimizationPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := ImageOptimizationPolicy(dAtA[iNdEx:postIndex]) + m.ImageOptimizationPolicy = &s + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Volumes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Volumes = append(m.Volumes, BuildVolume{}) + if err := m.Volumes[len(m.Volumes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DockerStrategyOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DockerStrategyOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DockerStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BuildArgs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.BuildArgs = append(m.BuildArgs, v11.EnvVar{}) + if err := m.BuildArgs[len(m.BuildArgs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NoCache", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.NoCache = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GenericWebHookCause) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GenericWebHookCause: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GenericWebHookCause: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Revision == nil { + m.Revision = &SourceRevision{} + } + if err := m.Revision.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Secret = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GenericWebHookEvent) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GenericWebHookEvent: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GenericWebHookEvent: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = BuildSourceType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Git", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Git == nil { + m.Git = &GitInfo{} + } + if err := m.Git.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Env = append(m.Env, v11.EnvVar{}) + if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DockerStrategyOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DockerStrategyOptions == nil { + m.DockerStrategyOptions = &DockerStrategyOptions{} + } + if err := m.DockerStrategyOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GitBuildSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GitBuildSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GitBuildSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field URI", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.URI = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ref = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProxyConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ProxyConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GitHubWebHookCause) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GitHubWebHookCause: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GitHubWebHookCause: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Revision == nil { + m.Revision = &SourceRevision{} + } + if err := m.Revision.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Secret = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GitInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GitInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GitInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GitBuildSource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.GitBuildSource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GitSourceRevision", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.GitSourceRevision.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Refs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Refs = append(m.Refs, GitRefInfo{}) + if err := m.Refs[len(m.Refs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GitLabWebHookCause) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GitLabWebHookCause: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GitLabWebHookCause: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CommonWebHookCause", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.CommonWebHookCause.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GitRefInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GitRefInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GitRefInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GitBuildSource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.GitBuildSource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GitSourceRevision", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.GitSourceRevision.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GitSourceRevision) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GitSourceRevision: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GitSourceRevision: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Commit = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Author", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Author.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Committer", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Committer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageChangeCause) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageChangeCause: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageChangeCause: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImageID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ImageID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FromRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.FromRef == nil { + m.FromRef = &v11.ObjectReference{} + } + if err := m.FromRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageChangeTrigger) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageChangeTrigger: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageChangeTrigger: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTriggeredImageID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LastTriggeredImageID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.From == nil { + m.From = &v11.ObjectReference{} + } + if err := m.From.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Paused", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Paused = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageChangeTriggerStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageChangeTriggerStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageChangeTriggerStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTriggeredImageID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LastTriggeredImageID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.From.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTriggerTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTriggerTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageLabel) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageLabel: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageLabel: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.From.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Paths", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Paths = append(m.Paths, ImageSourcePath{}) + if err := m.Paths[len(m.Paths)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PullSecret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PullSecret == nil { + m.PullSecret = &v11.LocalObjectReference{} + } + if err := m.PullSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field As", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.As = append(m.As, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageSourcePath) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageSourcePath: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageSourcePath: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SourcePath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SourcePath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DestinationDir", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DestinationDir = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageStreamTagReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageStreamTagReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageStreamTagReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JenkinsPipelineBuildStrategy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JenkinsPipelineBuildStrategy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JenkinsPipelineBuildStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field JenkinsfilePath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.JenkinsfilePath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Jenkinsfile", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Jenkinsfile = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Env = append(m.Env, v11.EnvVar{}) + if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OptionalNodeSelector) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OptionalNodeSelector: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OptionalNodeSelector: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if *m == nil { + *m = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + (*m)[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ProxyConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ProxyConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ProxyConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HTTPProxy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.HTTPProxy = &s + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HTTPSProxy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.HTTPSProxy = &s + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NoProxy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.NoProxy = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SecretBuildSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SecretBuildSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SecretBuildSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Secret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DestinationDir", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DestinationDir = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SecretLocalReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SecretLocalReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SecretLocalReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SecretSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SecretSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SecretSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretSource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.SecretSource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MountPath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MountPath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SourceBuildStrategy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SourceBuildStrategy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SourceBuildStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.From.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PullSecret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PullSecret == nil { + m.PullSecret = &v11.LocalObjectReference{} + } + if err := m.PullSecret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Env = append(m.Env, v11.EnvVar{}) + if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scripts", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Scripts = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Incremental", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Incremental = &b + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ForcePull", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ForcePull = bool(v != 0) + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Volumes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Volumes = append(m.Volumes, BuildVolume{}) + if err := m.Volumes[len(m.Volumes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SourceControlUser) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SourceControlUser: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SourceControlUser: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Email", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Email = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SourceRevision) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SourceRevision: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SourceRevision: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = BuildSourceType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Git", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Git == nil { + m.Git = &GitSourceRevision{} + } + if err := m.Git.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SourceStrategyOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SourceStrategyOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SourceStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Incremental", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Incremental = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StageInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StageInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StageInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = StageName(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StartTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.StartTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DurationMilliseconds", wireType) + } + m.DurationMilliseconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DurationMilliseconds |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Steps", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Steps = append(m.Steps, StepInfo{}) + if err := m.Steps[len(m.Steps)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StepInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StepInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StepInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = StepName(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StartTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.StartTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DurationMilliseconds", wireType) + } + m.DurationMilliseconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DurationMilliseconds |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WebHookTrigger) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WebHookTrigger: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WebHookTrigger: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Secret = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowEnv", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AllowEnv = bool(v != 0) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretReference", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecretReference == nil { + m.SecretReference = &SecretLocalReference{} + } + if err := m.SecretReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/openshift/api/build/v1/generated.proto b/vendor/github.com/openshift/api/build/v1/generated.proto new file mode 100644 index 000000000..57b54f392 --- /dev/null +++ b/vendor/github.com/openshift/api/build/v1/generated.proto @@ -0,0 +1,1239 @@ + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package github.com.openshift.api.build.v1; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "github.com/openshift/api/build/v1"; + +// BinaryBuildRequestOptions are the options required to fully speficy a binary build request +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message BinaryBuildRequestOptions { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // asFile determines if the binary should be created as a file within the source rather than extracted as an archive + optional string asFile = 2; + + // revision.commit is the value identifying a specific commit + optional string revisionCommit = 3; + + // revision.message is the description of a specific commit + optional string revisionMessage = 4; + + // revision.authorName of the source control user + optional string revisionAuthorName = 5; + + // revision.authorEmail of the source control user + optional string revisionAuthorEmail = 6; + + // revision.committerName of the source control user + optional string revisionCommitterName = 7; + + // revision.committerEmail of the source control user + optional string revisionCommitterEmail = 8; +} + +// BinaryBuildSource describes a binary file to be used for the Docker and Source build strategies, +// where the file will be extracted and used as the build source. +message BinaryBuildSource { + // asFile indicates that the provided binary input should be considered a single file + // within the build input. For example, specifying "webapp.war" would place the provided + // binary as `/webapp.war` for the builder. If left empty, the Docker and Source build + // strategies assume this file is a zip, tar, or tar.gz file and extract it as the source. + // The custom strategy receives this binary as standard input. This filename may not + // contain slashes or be '..' or '.'. + optional string asFile = 1; +} + +// BitbucketWebHookCause has information about a Bitbucket webhook that triggered a +// build. +message BitbucketWebHookCause { + optional CommonWebHookCause commonSpec = 1; +} + +// Build encapsulates the inputs needed to produce a new deployable image, as well as +// the status of the execution and a reference to the Pod which executed the build. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message Build { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // spec is all the inputs used to execute the build. + optional BuildSpec spec = 2; + + // status is the current status of the build. + // +optional + optional BuildStatus status = 3; +} + +// BuildCondition describes the state of a build at a certain point. +message BuildCondition { + // Type of build condition. + optional string type = 1; + + // Status of the condition, one of True, False, Unknown. + optional string status = 2; + + // The last time this condition was updated. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 6; + + // The last time the condition transitioned from one status to another. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + + // The reason for the condition's last transition. + optional string reason = 4; + + // A human readable message indicating details about the transition. + optional string message = 5; +} + +// Build configurations define a build process for new container images. There are three types of builds possible - a container image build using a Dockerfile, a Source-to-Image build that uses a specially prepared base image that accepts source code that it can make runnable, and a custom build that can run // arbitrary container images as a base and accept the build parameters. Builds run on the cluster and on completion are pushed to the container image registry specified in the "output" section. A build can be triggered via a webhook, when the base image changes, or when a user manually requests a new build be // created. +// +// Each build created by a build configuration is numbered and refers back to its parent configuration. Multiple builds can be triggered at once. Builds that do not have "output" set can be used to test code or run a verification build. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message BuildConfig { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // spec holds all the input necessary to produce a new build, and the conditions when + // to trigger them. + optional BuildConfigSpec spec = 2; + + // status holds any relevant information about a build config + // +optional + optional BuildConfigStatus status = 3; +} + +// BuildConfigList is a collection of BuildConfigs. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message BuildConfigList { + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is a list of build configs + repeated BuildConfig items = 2; +} + +// BuildConfigSpec describes when and how builds are created +message BuildConfigSpec { + // triggers determine how new Builds can be launched from a BuildConfig. If + // no triggers are defined, a new build can only occur as a result of an + // explicit client build creation. + // +optional + repeated BuildTriggerPolicy triggers = 1; + + // RunPolicy describes how the new build created from this build + // configuration will be scheduled for execution. + // This is optional, if not specified we default to "Serial". + optional string runPolicy = 2; + + // CommonSpec is the desired build specification + optional CommonSpec commonSpec = 3; + + // successfulBuildsHistoryLimit is the number of old successful builds to retain. + // When a BuildConfig is created, the 5 most recent successful builds are retained unless this value is set. + // If removed after the BuildConfig has been created, all successful builds are retained. + optional int32 successfulBuildsHistoryLimit = 4; + + // failedBuildsHistoryLimit is the number of old failed builds to retain. + // When a BuildConfig is created, the 5 most recent failed builds are retained unless this value is set. + // If removed after the BuildConfig has been created, all failed builds are retained. + optional int32 failedBuildsHistoryLimit = 5; +} + +// BuildConfigStatus contains current state of the build config object. +message BuildConfigStatus { + // lastVersion is used to inform about number of last triggered build. + optional int64 lastVersion = 1; + + // ImageChangeTriggers captures the runtime state of any ImageChangeTrigger specified in the BuildConfigSpec, + // including the value reconciled by the OpenShift APIServer for the lastTriggeredImageID. There is a single entry + // in this array for each image change trigger in spec. Each trigger status references the ImageStreamTag that acts as the source of the trigger. + repeated ImageChangeTriggerStatus imageChangeTriggers = 2; +} + +// BuildList is a collection of Builds. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message BuildList { + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is a list of builds + repeated Build items = 2; +} + +// BuildLog is the (unused) resource associated with the build log redirector +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message BuildLog { +} + +// BuildLogOptions is the REST options for a build log +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message BuildLogOptions { + // cointainer for which to stream logs. Defaults to only container if there is one container in the pod. + optional string container = 1; + + // follow if true indicates that the build log should be streamed until + // the build terminates. + optional bool follow = 2; + + // previous returns previous build logs. Defaults to false. + optional bool previous = 3; + + // sinceSeconds is a relative time in seconds before the current time from which to show logs. If this value + // precedes the time a pod was started, only logs since the pod start will be returned. + // If this value is in the future, no logs will be returned. + // Only one of sinceSeconds or sinceTime may be specified. + optional int64 sinceSeconds = 4; + + // sinceTime is an RFC3339 timestamp from which to show logs. If this value + // precedes the time a pod was started, only logs since the pod start will be returned. + // If this value is in the future, no logs will be returned. + // Only one of sinceSeconds or sinceTime may be specified. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time sinceTime = 5; + + // timestamps, If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line + // of log output. Defaults to false. + optional bool timestamps = 6; + + // tailLines, If set, is the number of lines from the end of the logs to show. If not specified, + // logs are shown from the creation of the container or sinceSeconds or sinceTime + optional int64 tailLines = 7; + + // limitBytes, If set, is the number of bytes to read from the server before terminating the + // log output. This may not display a complete final line of logging, and may return + // slightly more or slightly less than the specified limit. + optional int64 limitBytes = 8; + + // noWait if true causes the call to return immediately even if the build + // is not available yet. Otherwise the server will wait until the build has started. + // TODO: Fix the tag to 'noWait' in v2 + optional bool nowait = 9; + + // version of the build for which to view logs. + optional int64 version = 10; + + // insecureSkipTLSVerifyBackend indicates that the apiserver should not confirm the validity of the + // serving certificate of the backend it is connecting to. This will make the HTTPS connection between the apiserver + // and the backend insecure. This means the apiserver cannot verify the log data it is receiving came from the real + // kubelet. If the kubelet is configured to verify the apiserver's TLS credentials, it does not mean the + // connection to the real kubelet is vulnerable to a man in the middle attack (e.g. an attacker could not intercept + // the actual log data coming from the real kubelet). + // +optional + optional bool insecureSkipTLSVerifyBackend = 11; +} + +// BuildOutput is input to a build strategy and describes the container image that the strategy +// should produce. +message BuildOutput { + // to defines an optional location to push the output of this build to. + // Kind must be one of 'ImageStreamTag' or 'DockerImage'. + // This value will be used to look up a container image repository to push to. + // In the case of an ImageStreamTag, the ImageStreamTag will be looked for in the namespace of + // the build unless Namespace is specified. + optional k8s.io.api.core.v1.ObjectReference to = 1; + + // PushSecret is the name of a Secret that would be used for setting + // up the authentication for executing the Docker push to authentication + // enabled Docker Registry (or Docker Hub). + optional k8s.io.api.core.v1.LocalObjectReference pushSecret = 2; + + // imageLabels define a list of labels that are applied to the resulting image. If there + // are multiple labels with the same name then the last one in the list is used. + repeated ImageLabel imageLabels = 3; +} + +// A BuildPostCommitSpec holds a build post commit hook specification. The hook +// executes a command in a temporary container running the build output image, +// immediately after the last layer of the image is committed and before the +// image is pushed to a registry. The command is executed with the current +// working directory ($PWD) set to the image's WORKDIR. +// +// The build will be marked as failed if the hook execution fails. It will fail +// if the script or command return a non-zero exit code, or if there is any +// other error related to starting the temporary container. +// +// There are five different ways to configure the hook. As an example, all forms +// below are equivalent and will execute `rake test --verbose`. +// +// 1. Shell script: +// +// "postCommit": { +// "script": "rake test --verbose", +// } +// +// The above is a convenient form which is equivalent to: +// +// "postCommit": { +// "command": ["/bin/sh", "-ic"], +// "args": ["rake test --verbose"] +// } +// +// 2. A command as the image entrypoint: +// +// "postCommit": { +// "commit": ["rake", "test", "--verbose"] +// } +// +// Command overrides the image entrypoint in the exec form, as documented in +// Docker: https://docs.docker.com/engine/reference/builder/#entrypoint. +// +// 3. Pass arguments to the default entrypoint: +// +// "postCommit": { +// "args": ["rake", "test", "--verbose"] +// } +// +// This form is only useful if the image entrypoint can handle arguments. +// +// 4. Shell script with arguments: +// +// "postCommit": { +// "script": "rake test $1", +// "args": ["--verbose"] +// } +// +// This form is useful if you need to pass arguments that would otherwise be +// hard to quote properly in the shell script. In the script, $0 will be +// "/bin/sh" and $1, $2, etc, are the positional arguments from Args. +// +// 5. Command with arguments: +// +// "postCommit": { +// "command": ["rake", "test"], +// "args": ["--verbose"] +// } +// +// This form is equivalent to appending the arguments to the Command slice. +// +// It is invalid to provide both Script and Command simultaneously. If none of +// the fields are specified, the hook is not executed. +message BuildPostCommitSpec { + // command is the command to run. It may not be specified with Script. + // This might be needed if the image doesn't have `/bin/sh`, or if you + // do not want to use a shell. In all other cases, using Script might be + // more convenient. + repeated string command = 1; + + // args is a list of arguments that are provided to either Command, + // Script or the container image's default entrypoint. The arguments are + // placed immediately after the command to be run. + repeated string args = 2; + + // script is a shell script to be run with `/bin/sh -ic`. It may not be + // specified with Command. Use Script when a shell script is appropriate + // to execute the post build hook, for example for running unit tests + // with `rake test`. If you need control over the image entrypoint, or + // if the image does not have `/bin/sh`, use Command and/or Args. + // The `-i` flag is needed to support CentOS and RHEL images that use + // Software Collections (SCL), in order to have the appropriate + // collections enabled in the shell. E.g., in the Ruby image, this is + // necessary to make `ruby`, `bundle` and other binaries available in + // the PATH. + optional string script = 3; +} + +// BuildRequest is the resource used to pass parameters to build generator +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message BuildRequest { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // revision is the information from the source for a specific repo snapshot. + optional SourceRevision revision = 2; + + // triggeredByImage is the Image that triggered this build. + optional k8s.io.api.core.v1.ObjectReference triggeredByImage = 3; + + // from is the reference to the ImageStreamTag that triggered the build. + optional k8s.io.api.core.v1.ObjectReference from = 4; + + // binary indicates a request to build from a binary provided to the builder + optional BinaryBuildSource binary = 5; + + // lastVersion (optional) is the LastVersion of the BuildConfig that was used + // to generate the build. If the BuildConfig in the generator doesn't match, a build will + // not be generated. + optional int64 lastVersion = 6; + + // env contains additional environment variables you want to pass into a builder container. + repeated k8s.io.api.core.v1.EnvVar env = 7; + + // triggeredBy describes which triggers started the most recent update to the + // build configuration and contains information about those triggers. + repeated BuildTriggerCause triggeredBy = 8; + + // DockerStrategyOptions contains additional docker-strategy specific options for the build + optional DockerStrategyOptions dockerStrategyOptions = 9; + + // SourceStrategyOptions contains additional source-strategy specific options for the build + optional SourceStrategyOptions sourceStrategyOptions = 10; +} + +// BuildSource is the SCM used for the build. +message BuildSource { + // type of build input to accept + // +k8s:conversion-gen=false + // +optional + optional string type = 1; + + // binary builds accept a binary as their input. The binary is generally assumed to be a tar, + // gzipped tar, or zip file depending on the strategy. For container image builds, this is the build + // context and an optional Dockerfile may be specified to override any Dockerfile in the + // build context. For Source builds, this is assumed to be an archive as described above. For + // Source and container image builds, if binary.asFile is set the build will receive a directory with + // a single file. contextDir may be used when an archive is provided. Custom builds will + // receive this binary as input on STDIN. + optional BinaryBuildSource binary = 2; + + // dockerfile is the raw contents of a Dockerfile which should be built. When this option is + // specified, the FROM may be modified based on your strategy base image and additional ENV + // stanzas from your strategy environment will be added after the FROM, but before the rest + // of your Dockerfile stanzas. The Dockerfile source type may be used with other options like + // git - in those cases the Git repo will have any innate Dockerfile replaced in the context + // dir. + optional string dockerfile = 3; + + // git contains optional information about git build source + optional GitBuildSource git = 4; + + // images describes a set of images to be used to provide source for the build + repeated ImageSource images = 5; + + // contextDir specifies the sub-directory where the source code for the application exists. + // This allows to have buildable sources in directory other than root of + // repository. + optional string contextDir = 6; + + // sourceSecret is the name of a Secret that would be used for setting + // up the authentication for cloning private repository. + // The secret contains valid credentials for remote repository, where the + // data's key represent the authentication method to be used and value is + // the base64 encoded credentials. Supported auth methods are: ssh-privatekey. + optional k8s.io.api.core.v1.LocalObjectReference sourceSecret = 7; + + // secrets represents a list of secrets and their destinations that will + // be used only for the build. + repeated SecretBuildSource secrets = 8; + + // configMaps represents a list of configMaps and their destinations that will + // be used for the build. + repeated ConfigMapBuildSource configMaps = 9; +} + +// BuildSpec has the information to represent a build and also additional +// information about a build +message BuildSpec { + // CommonSpec is the information that represents a build + optional CommonSpec commonSpec = 1; + + // triggeredBy describes which triggers started the most recent update to the + // build configuration and contains information about those triggers. + repeated BuildTriggerCause triggeredBy = 2; +} + +// BuildStatus contains the status of a build +message BuildStatus { + // phase is the point in the build lifecycle. Possible values are + // "New", "Pending", "Running", "Complete", "Failed", "Error", and "Cancelled". + optional string phase = 1; + + // cancelled describes if a cancel event was triggered for the build. + optional bool cancelled = 2; + + // reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI. + optional string reason = 3; + + // message is a human-readable message indicating details about why the build has this status. + optional string message = 4; + + // startTimestamp is a timestamp representing the server time when this Build started + // running in a Pod. + // It is represented in RFC3339 form and is in UTC. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time startTimestamp = 5; + + // completionTimestamp is a timestamp representing the server time when this Build was + // finished, whether that build failed or succeeded. It reflects the time at which + // the Pod running the Build terminated. + // It is represented in RFC3339 form and is in UTC. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time completionTimestamp = 6; + + // duration contains time.Duration object describing build time. + optional int64 duration = 7; + + // outputDockerImageReference contains a reference to the container image that + // will be built by this build. Its value is computed from + // Build.Spec.Output.To, and should include the registry address, so that + // it can be used to push and pull the image. + optional string outputDockerImageReference = 8; + + // config is an ObjectReference to the BuildConfig this Build is based on. + optional k8s.io.api.core.v1.ObjectReference config = 9; + + // output describes the container image the build has produced. + optional BuildStatusOutput output = 10; + + // stages contains details about each stage that occurs during the build + // including start time, duration (in milliseconds), and the steps that + // occured within each stage. + repeated StageInfo stages = 11; + + // logSnippet is the last few lines of the build log. This value is only set for builds that failed. + optional string logSnippet = 12; + + // Conditions represents the latest available observations of a build's current state. + // +patchMergeKey=type + // +patchStrategy=merge + repeated BuildCondition conditions = 13; +} + +// BuildStatusOutput contains the status of the built image. +message BuildStatusOutput { + // to describes the status of the built image being pushed to a registry. + optional BuildStatusOutputTo to = 1; +} + +// BuildStatusOutputTo describes the status of the built image with regards to +// image registry to which it was supposed to be pushed. +message BuildStatusOutputTo { + // imageDigest is the digest of the built container image. The digest uniquely + // identifies the image in the registry to which it was pushed. + // + // Please note that this field may not always be set even if the push + // completes successfully - e.g. when the registry returns no digest or + // returns it in a format that the builder doesn't understand. + optional string imageDigest = 1; +} + +// BuildStrategy contains the details of how to perform a build. +message BuildStrategy { + // type is the kind of build strategy. + // +k8s:conversion-gen=false + // +optional + optional string type = 1; + + // dockerStrategy holds the parameters to the container image build strategy. + optional DockerBuildStrategy dockerStrategy = 2; + + // sourceStrategy holds the parameters to the Source build strategy. + optional SourceBuildStrategy sourceStrategy = 3; + + // customStrategy holds the parameters to the Custom build strategy + optional CustomBuildStrategy customStrategy = 4; + + // JenkinsPipelineStrategy holds the parameters to the Jenkins Pipeline build strategy. + // Deprecated: use OpenShift Pipelines + optional JenkinsPipelineBuildStrategy jenkinsPipelineStrategy = 5; +} + +// BuildTriggerCause holds information about a triggered build. It is used for +// displaying build trigger data for each build and build configuration in oc +// describe. It is also used to describe which triggers led to the most recent +// update in the build configuration. +message BuildTriggerCause { + // message is used to store a human readable message for why the build was + // triggered. E.g.: "Manually triggered by user", "Configuration change",etc. + optional string message = 1; + + // genericWebHook holds data about a builds generic webhook trigger. + optional GenericWebHookCause genericWebHook = 2; + + // gitHubWebHook represents data for a GitHub webhook that fired a + // specific build. + optional GitHubWebHookCause githubWebHook = 3; + + // imageChangeBuild stores information about an imagechange event + // that triggered a new build. + optional ImageChangeCause imageChangeBuild = 4; + + // GitLabWebHook represents data for a GitLab webhook that fired a specific + // build. + optional GitLabWebHookCause gitlabWebHook = 5; + + // BitbucketWebHook represents data for a Bitbucket webhook that fired a + // specific build. + optional BitbucketWebHookCause bitbucketWebHook = 6; +} + +// BuildTriggerPolicy describes a policy for a single trigger that results in a new Build. +message BuildTriggerPolicy { + // type is the type of build trigger. Valid values: + // + // - GitHub + // GitHubWebHookBuildTriggerType represents a trigger that launches builds on + // GitHub webhook invocations + // + // - Generic + // GenericWebHookBuildTriggerType represents a trigger that launches builds on + // generic webhook invocations + // + // - GitLab + // GitLabWebHookBuildTriggerType represents a trigger that launches builds on + // GitLab webhook invocations + // + // - Bitbucket + // BitbucketWebHookBuildTriggerType represents a trigger that launches builds on + // Bitbucket webhook invocations + // + // - ImageChange + // ImageChangeBuildTriggerType represents a trigger that launches builds on + // availability of a new version of an image + // + // - ConfigChange + // ConfigChangeBuildTriggerType will trigger a build on an initial build config creation + // WARNING: In the future the behavior will change to trigger a build on any config change + optional string type = 1; + + // github contains the parameters for a GitHub webhook type of trigger + optional WebHookTrigger github = 2; + + // generic contains the parameters for a Generic webhook type of trigger + optional WebHookTrigger generic = 3; + + // imageChange contains parameters for an ImageChange type of trigger + optional ImageChangeTrigger imageChange = 4; + + // GitLabWebHook contains the parameters for a GitLab webhook type of trigger + optional WebHookTrigger gitlab = 5; + + // BitbucketWebHook contains the parameters for a Bitbucket webhook type of + // trigger + optional WebHookTrigger bitbucket = 6; +} + +// BuildVolume describes a volume that is made available to build pods, +// such that it can be mounted into buildah's runtime environment. +// Only a subset of Kubernetes Volume sources are supported. +message BuildVolume { + // name is a unique identifier for this BuildVolume. + // It must conform to the Kubernetes DNS label standard and be unique within the pod. + // Names that collide with those added by the build controller will result in a + // failed build with an error message detailing which name caused the error. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + // +required + optional string name = 1; + + // source represents the location and type of the mounted volume. + // +required + optional BuildVolumeSource source = 2; + + // mounts represents the location of the volume in the image build container + // +required + // +listType=map + // +listMapKey=destinationPath + // +patchMergeKey=destinationPath + // +patchStrategy=merge + repeated BuildVolumeMount mounts = 3; +} + +// BuildVolumeMount describes the mounting of a Volume within buildah's runtime environment. +message BuildVolumeMount { + // destinationPath is the path within the buildah runtime environment at which the volume should be mounted. + // The transient mount within the build image and the backing volume will both be mounted read only. + // Must be an absolute path, must not contain '..' or ':', and must not collide with a destination path generated + // by the builder process + // Paths that collide with those added by the build controller will result in a + // failed build with an error message detailing which path caused the error. + optional string destinationPath = 1; +} + +// BuildVolumeSource represents the source of a volume to mount +// Only one of its supported types may be specified at any given time. +message BuildVolumeSource { + // type is the BuildVolumeSourceType for the volume source. + // Type must match the populated volume source. + // Valid types are: Secret, ConfigMap + optional string type = 1; + + // secret represents a Secret that should populate this volume. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + // +optional + optional k8s.io.api.core.v1.SecretVolumeSource secret = 2; + + // configMap represents a ConfigMap that should populate this volume + // +optional + optional k8s.io.api.core.v1.ConfigMapVolumeSource configMap = 3; + + // csi represents ephemeral storage provided by external CSI drivers which support this capability + // +optional + optional k8s.io.api.core.v1.CSIVolumeSource csi = 4; +} + +// CommonSpec encapsulates all the inputs necessary to represent a build. +message CommonSpec { + // serviceAccount is the name of the ServiceAccount to use to run the pod + // created by this build. + // The pod will be allowed to use secrets referenced by the ServiceAccount + optional string serviceAccount = 1; + + // source describes the SCM in use. + optional BuildSource source = 2; + + // revision is the information from the source for a specific repo snapshot. + // This is optional. + optional SourceRevision revision = 3; + + // strategy defines how to perform a build. + optional BuildStrategy strategy = 4; + + // output describes the container image the Strategy should produce. + optional BuildOutput output = 5; + + // resources computes resource requirements to execute the build. + optional k8s.io.api.core.v1.ResourceRequirements resources = 6; + + // postCommit is a build hook executed after the build output image is + // committed, before it is pushed to a registry. + optional BuildPostCommitSpec postCommit = 7; + + // completionDeadlineSeconds is an optional duration in seconds, counted from + // the time when a build pod gets scheduled in the system, that the build may + // be active on a node before the system actively tries to terminate the + // build; value must be positive integer + optional int64 completionDeadlineSeconds = 8; + + // nodeSelector is a selector which must be true for the build pod to fit on a node + // If nil, it can be overridden by default build nodeselector values for the cluster. + // If set to an empty map or a map with any values, default build nodeselector values + // are ignored. + // +optional + optional OptionalNodeSelector nodeSelector = 9; + + // mountTrustedCA bind mounts the cluster's trusted certificate authorities, as defined in + // the cluster's proxy configuration, into the build. This lets processes within a build trust + // components signed by custom PKI certificate authorities, such as private artifact + // repositories and HTTPS proxies. + // + // When this field is set to true, the contents of `/etc/pki/ca-trust` within the build are + // managed by the build container, and any changes to this directory or its subdirectories (for + // example - within a Dockerfile `RUN` instruction) are not persisted in the build's output image. + optional bool mountTrustedCA = 10; +} + +// CommonWebHookCause factors out the identical format of these webhook +// causes into struct so we can share it in the specific causes; it is too late for +// GitHub and Generic but we can leverage this pattern with GitLab and Bitbucket. +message CommonWebHookCause { + // Revision is the git source revision information of the trigger. + optional SourceRevision revision = 1; + + // Secret is the obfuscated webhook secret that triggered a build. + optional string secret = 2; +} + +// ConfigMapBuildSource describes a configmap and its destination directory that will be +// used only at the build time. The content of the configmap referenced here will +// be copied into the destination directory instead of mounting. +message ConfigMapBuildSource { + // configMap is a reference to an existing configmap that you want to use in your + // build. + optional k8s.io.api.core.v1.LocalObjectReference configMap = 1; + + // destinationDir is the directory where the files from the configmap should be + // available for the build time. + // For the Source build strategy, these will be injected into a container + // where the assemble script runs. + // For the container image build strategy, these will be copied into the build + // directory, where the Dockerfile is located, so users can ADD or COPY them + // during container image build. + optional string destinationDir = 2; +} + +// CustomBuildStrategy defines input parameters specific to Custom build. +message CustomBuildStrategy { + // from is reference to an DockerImage, ImageStreamTag, or ImageStreamImage from which + // the container image should be pulled + optional k8s.io.api.core.v1.ObjectReference from = 1; + + // pullSecret is the name of a Secret that would be used for setting up + // the authentication for pulling the container images from the private Docker + // registries + optional k8s.io.api.core.v1.LocalObjectReference pullSecret = 2; + + // env contains additional environment variables you want to pass into a builder container. + repeated k8s.io.api.core.v1.EnvVar env = 3; + + // exposeDockerSocket will allow running Docker commands (and build container images) from + // inside the container. + // TODO: Allow admins to enforce 'false' for this option + optional bool exposeDockerSocket = 4; + + // forcePull describes if the controller should configure the build pod to always pull the images + // for the builder or only pull if it is not present locally + optional bool forcePull = 5; + + // secrets is a list of additional secrets that will be included in the build pod + repeated SecretSpec secrets = 6; + + // buildAPIVersion is the requested API version for the Build object serialized and passed to the custom builder + optional string buildAPIVersion = 7; +} + +// DockerBuildStrategy defines input parameters specific to container image build. +message DockerBuildStrategy { + // from is a reference to an DockerImage, ImageStreamTag, or ImageStreamImage which overrides + // the FROM image in the Dockerfile for the build. If the Dockerfile uses multi-stage builds, + // this will replace the image in the last FROM directive of the file. + optional k8s.io.api.core.v1.ObjectReference from = 1; + + // pullSecret is the name of a Secret that would be used for setting up + // the authentication for pulling the container images from the private Docker + // registries + optional k8s.io.api.core.v1.LocalObjectReference pullSecret = 2; + + // noCache if set to true indicates that the container image build must be executed with the + // --no-cache=true flag + optional bool noCache = 3; + + // env contains additional environment variables you want to pass into a builder container. + repeated k8s.io.api.core.v1.EnvVar env = 4; + + // forcePull describes if the builder should pull the images from registry prior to building. + optional bool forcePull = 5; + + // dockerfilePath is the path of the Dockerfile that will be used to build the container image, + // relative to the root of the context (contextDir). + // Defaults to `Dockerfile` if unset. + optional string dockerfilePath = 6; + + // buildArgs contains build arguments that will be resolved in the Dockerfile. See + // https://docs.docker.com/engine/reference/builder/#/arg for more details. + // NOTE: Only the 'name' and 'value' fields are supported. Any settings on the 'valueFrom' field + // are ignored. + repeated k8s.io.api.core.v1.EnvVar buildArgs = 7; + + // imageOptimizationPolicy describes what optimizations the system can use when building images + // to reduce the final size or time spent building the image. The default policy is 'None' which + // means the final build image will be equivalent to an image created by the container image build API. + // The experimental policy 'SkipLayers' will avoid commiting new layers in between each + // image step, and will fail if the Dockerfile cannot provide compatibility with the 'None' + // policy. An additional experimental policy 'SkipLayersAndWarn' is the same as + // 'SkipLayers' but simply warns if compatibility cannot be preserved. + optional string imageOptimizationPolicy = 8; + + // volumes is a list of input volumes that can be mounted into the builds runtime environment. + // Only a subset of Kubernetes Volume sources are supported by builds. + // More info: https://kubernetes.io/docs/concepts/storage/volumes + // +listType=map + // +listMapKey=name + // +patchMergeKey=name + // +patchStrategy=merge + repeated BuildVolume volumes = 9; +} + +// DockerStrategyOptions contains extra strategy options for container image builds +message DockerStrategyOptions { + // Args contains any build arguments that are to be passed to Docker. See + // https://docs.docker.com/engine/reference/builder/#/arg for more details + repeated k8s.io.api.core.v1.EnvVar buildArgs = 1; + + // noCache overrides the docker-strategy noCache option in the build config + optional bool noCache = 2; +} + +// GenericWebHookCause holds information about a generic WebHook that +// triggered a build. +message GenericWebHookCause { + // revision is an optional field that stores the git source revision + // information of the generic webhook trigger when it is available. + optional SourceRevision revision = 1; + + // secret is the obfuscated webhook secret that triggered a build. + optional string secret = 2; +} + +// GenericWebHookEvent is the payload expected for a generic webhook post +message GenericWebHookEvent { + // type is the type of source repository + // +k8s:conversion-gen=false + optional string type = 1; + + // git is the git information if the Type is BuildSourceGit + optional GitInfo git = 2; + + // env contains additional environment variables you want to pass into a builder container. + // ValueFrom is not supported. + repeated k8s.io.api.core.v1.EnvVar env = 3; + + // DockerStrategyOptions contains additional docker-strategy specific options for the build + optional DockerStrategyOptions dockerStrategyOptions = 4; +} + +// GitBuildSource defines the parameters of a Git SCM +message GitBuildSource { + // uri points to the source that will be built. The structure of the source + // will depend on the type of build to run + optional string uri = 1; + + // ref is the branch/tag/ref to build. + optional string ref = 2; + + // proxyConfig defines the proxies to use for the git clone operation. Values + // not set here are inherited from cluster-wide build git proxy settings. + optional ProxyConfig proxyConfig = 3; +} + +// GitHubWebHookCause has information about a GitHub webhook that triggered a +// build. +message GitHubWebHookCause { + // revision is the git revision information of the trigger. + optional SourceRevision revision = 1; + + // secret is the obfuscated webhook secret that triggered a build. + optional string secret = 2; +} + +// GitInfo is the aggregated git information for a generic webhook post +message GitInfo { + optional GitBuildSource gitBuildSource = 1; + + optional GitSourceRevision gitSourceRevision = 2; + + // Refs is a list of GitRefs for the provided repo - generally sent + // when used from a post-receive hook. This field is optional and is + // used when sending multiple refs + repeated GitRefInfo refs = 3; +} + +// GitLabWebHookCause has information about a GitLab webhook that triggered a +// build. +message GitLabWebHookCause { + optional CommonWebHookCause commonSpec = 1; +} + +// GitRefInfo is a single ref +message GitRefInfo { + optional GitBuildSource gitBuildSource = 1; + + optional GitSourceRevision gitSourceRevision = 2; +} + +// GitSourceRevision is the commit information from a git source for a build +message GitSourceRevision { + // commit is the commit hash identifying a specific commit + optional string commit = 1; + + // author is the author of a specific commit + optional SourceControlUser author = 2; + + // committer is the committer of a specific commit + optional SourceControlUser committer = 3; + + // message is the description of a specific commit + optional string message = 4; +} + +// ImageChangeCause contains information about the image that triggered a +// build +message ImageChangeCause { + // imageID is the ID of the image that triggered a new build. + optional string imageID = 1; + + // fromRef contains detailed information about an image that triggered a + // build. + optional k8s.io.api.core.v1.ObjectReference fromRef = 2; +} + +// ImageChangeTrigger allows builds to be triggered when an ImageStream changes +message ImageChangeTrigger { + // lastTriggeredImageID is used internally by the ImageChangeController to save last + // used image ID for build + // This field is deprecated and will be removed in a future release. + // Deprecated + optional string lastTriggeredImageID = 1; + + // from is a reference to an ImageStreamTag that will trigger a build when updated + // It is optional. If no From is specified, the From image from the build strategy + // will be used. Only one ImageChangeTrigger with an empty From reference is allowed in + // a build configuration. + optional k8s.io.api.core.v1.ObjectReference from = 2; + + // paused is true if this trigger is temporarily disabled. Optional. + optional bool paused = 3; +} + +// ImageChangeTriggerStatus tracks the latest resolved status of the associated ImageChangeTrigger policy +// specified in the BuildConfigSpec.Triggers struct. +message ImageChangeTriggerStatus { + // lastTriggeredImageID represents the sha/id of the ImageStreamTag when a Build for this BuildConfig was started. + // The lastTriggeredImageID is updated each time a Build for this BuildConfig is started, even if this ImageStreamTag is not the reason the Build is started. + optional string lastTriggeredImageID = 1; + + // from is the ImageStreamTag that is the source of the trigger. + optional ImageStreamTagReference from = 2; + + // lastTriggerTime is the last time this particular ImageStreamTag triggered a Build to start. + // This field is only updated when this trigger specifically started a Build. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTriggerTime = 3; +} + +// ImageLabel represents a label applied to the resulting image. +message ImageLabel { + // name defines the name of the label. It must have non-zero length. + optional string name = 1; + + // value defines the literal value of the label. + optional string value = 2; +} + +// ImageSource is used to describe build source that will be extracted from an image or used during a +// multi stage build. A reference of type ImageStreamTag, ImageStreamImage or DockerImage may be used. +// A pull secret can be specified to pull the image from an external registry or override the default +// service account secret if pulling from the internal registry. Image sources can either be used to +// extract content from an image and place it into the build context along with the repository source, +// or used directly during a multi-stage container image build to allow content to be copied without overwriting +// the contents of the repository source (see the 'paths' and 'as' fields). +message ImageSource { + // from is a reference to an ImageStreamTag, ImageStreamImage, or DockerImage to + // copy source from. + optional k8s.io.api.core.v1.ObjectReference from = 1; + + // A list of image names that this source will be used in place of during a multi-stage container image + // build. For instance, a Dockerfile that uses "COPY --from=nginx:latest" will first check for an image + // source that has "nginx:latest" in this field before attempting to pull directly. If the Dockerfile + // does not reference an image source it is ignored. This field and paths may both be set, in which case + // the contents will be used twice. + // +optional + repeated string as = 4; + + // paths is a list of source and destination paths to copy from the image. This content will be copied + // into the build context prior to starting the build. If no paths are set, the build context will + // not be altered. + // +optional + repeated ImageSourcePath paths = 2; + + // pullSecret is a reference to a secret to be used to pull the image from a registry + // If the image is pulled from the OpenShift registry, this field does not need to be set. + optional k8s.io.api.core.v1.LocalObjectReference pullSecret = 3; +} + +// ImageSourcePath describes a path to be copied from a source image and its destination within the build directory. +message ImageSourcePath { + // sourcePath is the absolute path of the file or directory inside the image to + // copy to the build directory. If the source path ends in /. then the content of + // the directory will be copied, but the directory itself will not be created at the + // destination. + optional string sourcePath = 1; + + // destinationDir is the relative directory within the build directory + // where files copied from the image are placed. + optional string destinationDir = 2; +} + +// ImageStreamTagReference references the ImageStreamTag in an image change trigger by namespace and name. +message ImageStreamTagReference { + // namespace is the namespace where the ImageStreamTag for an ImageChangeTrigger is located + optional string namespace = 1; + + // name is the name of the ImageStreamTag for an ImageChangeTrigger + optional string name = 2; +} + +// JenkinsPipelineBuildStrategy holds parameters specific to a Jenkins Pipeline build. +// Deprecated: use OpenShift Pipelines +message JenkinsPipelineBuildStrategy { + // JenkinsfilePath is the optional path of the Jenkinsfile that will be used to configure the pipeline + // relative to the root of the context (contextDir). If both JenkinsfilePath & Jenkinsfile are + // both not specified, this defaults to Jenkinsfile in the root of the specified contextDir. + optional string jenkinsfilePath = 1; + + // Jenkinsfile defines the optional raw contents of a Jenkinsfile which defines a Jenkins pipeline build. + optional string jenkinsfile = 2; + + // env contains additional environment variables you want to pass into a build pipeline. + repeated k8s.io.api.core.v1.EnvVar env = 3; +} + +// OptionalNodeSelector is a map that may also be left nil to distinguish between set and unset. +// +protobuf.nullable=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +message OptionalNodeSelector { + // items, if empty, will result in an empty map + + map items = 1; +} + +// ProxyConfig defines what proxies to use for an operation +message ProxyConfig { + // httpProxy is a proxy used to reach the git repository over http + optional string httpProxy = 3; + + // httpsProxy is a proxy used to reach the git repository over https + optional string httpsProxy = 4; + + // noProxy is the list of domains for which the proxy should not be used + optional string noProxy = 5; +} + +// SecretBuildSource describes a secret and its destination directory that will be +// used only at the build time. The content of the secret referenced here will +// be copied into the destination directory instead of mounting. +message SecretBuildSource { + // secret is a reference to an existing secret that you want to use in your + // build. + optional k8s.io.api.core.v1.LocalObjectReference secret = 1; + + // destinationDir is the directory where the files from the secret should be + // available for the build time. + // For the Source build strategy, these will be injected into a container + // where the assemble script runs. Later, when the script finishes, all files + // injected will be truncated to zero length. + // For the container image build strategy, these will be copied into the build + // directory, where the Dockerfile is located, so users can ADD or COPY them + // during container image build. + optional string destinationDir = 2; +} + +// SecretLocalReference contains information that points to the local secret being used +message SecretLocalReference { + // Name is the name of the resource in the same namespace being referenced + optional string name = 1; +} + +// SecretSpec specifies a secret to be included in a build pod and its corresponding mount point +message SecretSpec { + // secretSource is a reference to the secret + optional k8s.io.api.core.v1.LocalObjectReference secretSource = 1; + + // mountPath is the path at which to mount the secret + optional string mountPath = 2; +} + +// SourceBuildStrategy defines input parameters specific to an Source build. +message SourceBuildStrategy { + // from is reference to an DockerImage, ImageStreamTag, or ImageStreamImage from which + // the container image should be pulled + optional k8s.io.api.core.v1.ObjectReference from = 1; + + // pullSecret is the name of a Secret that would be used for setting up + // the authentication for pulling the container images from the private Docker + // registries + optional k8s.io.api.core.v1.LocalObjectReference pullSecret = 2; + + // env contains additional environment variables you want to pass into a builder container. + repeated k8s.io.api.core.v1.EnvVar env = 3; + + // scripts is the location of Source scripts + optional string scripts = 4; + + // incremental flag forces the Source build to do incremental builds if true. + optional bool incremental = 5; + + // forcePull describes if the builder should pull the images from registry prior to building. + optional bool forcePull = 6; + + // volumes is a list of input volumes that can be mounted into the builds runtime environment. + // Only a subset of Kubernetes Volume sources are supported by builds. + // More info: https://kubernetes.io/docs/concepts/storage/volumes + // +listType=map + // +listMapKey=name + // +patchMergeKey=name + // +patchStrategy=merge + repeated BuildVolume volumes = 9; +} + +// SourceControlUser defines the identity of a user of source control +message SourceControlUser { + // name of the source control user + optional string name = 1; + + // email of the source control user + optional string email = 2; +} + +// SourceRevision is the revision or commit information from the source for the build +message SourceRevision { + // type of the build source, may be one of 'Source', 'Dockerfile', 'Binary', or 'Images' + // +k8s:conversion-gen=false + optional string type = 1; + + // Git contains information about git-based build source + optional GitSourceRevision git = 2; +} + +// SourceStrategyOptions contains extra strategy options for Source builds +message SourceStrategyOptions { + // incremental overrides the source-strategy incremental option in the build config + optional bool incremental = 1; +} + +// StageInfo contains details about a build stage. +message StageInfo { + // name is a unique identifier for each build stage that occurs. + optional string name = 1; + + // startTime is a timestamp representing the server time when this Stage started. + // It is represented in RFC3339 form and is in UTC. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time startTime = 2; + + // durationMilliseconds identifies how long the stage took + // to complete in milliseconds. + // Note: the duration of a stage can exceed the sum of the duration of the steps within + // the stage as not all actions are accounted for in explicit build steps. + optional int64 durationMilliseconds = 3; + + // steps contains details about each step that occurs during a build stage + // including start time and duration in milliseconds. + repeated StepInfo steps = 4; +} + +// StepInfo contains details about a build step. +message StepInfo { + // name is a unique identifier for each build step. + optional string name = 1; + + // startTime is a timestamp representing the server time when this Step started. + // it is represented in RFC3339 form and is in UTC. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time startTime = 2; + + // durationMilliseconds identifies how long the step took + // to complete in milliseconds. + optional int64 durationMilliseconds = 3; +} + +// WebHookTrigger is a trigger that gets invoked using a webhook type of post +message WebHookTrigger { + // secret used to validate requests. + // Deprecated: use SecretReference instead. + optional string secret = 1; + + // allowEnv determines whether the webhook can set environment variables; can only + // be set to true for GenericWebHook. + optional bool allowEnv = 2; + + // secretReference is a reference to a secret in the same namespace, + // containing the value to be validated when the webhook is invoked. + // The secret being referenced must contain a key named "WebHookSecretKey", the value + // of which will be checked against the value supplied in the webhook invocation. + optional SecretLocalReference secretReference = 3; +} + diff --git a/vendor/github.com/openshift/api/build/v1/legacy.go b/vendor/github.com/openshift/api/build/v1/legacy.go new file mode 100644 index 000000000..a74627d2c --- /dev/null +++ b/vendor/github.com/openshift/api/build/v1/legacy.go @@ -0,0 +1,28 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + legacyGroupVersion = schema.GroupVersion{Group: "", Version: "v1"} + legacySchemeBuilder = runtime.NewSchemeBuilder(addLegacyKnownTypes, corev1.AddToScheme) + DeprecatedInstallWithoutGroup = legacySchemeBuilder.AddToScheme +) + +func addLegacyKnownTypes(scheme *runtime.Scheme) error { + types := []runtime.Object{ + &Build{}, + &BuildList{}, + &BuildConfig{}, + &BuildConfigList{}, + &BuildLog{}, + &BuildRequest{}, + &BuildLogOptions{}, + &BinaryBuildRequestOptions{}, + } + scheme.AddKnownTypes(legacyGroupVersion, types...) + return nil +} diff --git a/vendor/github.com/openshift/api/build/v1/register.go b/vendor/github.com/openshift/api/build/v1/register.go new file mode 100644 index 000000000..16f68ea8c --- /dev/null +++ b/vendor/github.com/openshift/api/build/v1/register.go @@ -0,0 +1,47 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + GroupName = "build.openshift.io" + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, corev1.AddToScheme) + // Install is a function which adds this version to a scheme + Install = schemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = schemeBuilder.AddToScheme +) + +// Resource generated code relies on this being here, but it logically belongs to the group +// DEPRECATED +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +// addKnownTypes adds types to API group +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &Build{}, + &BuildList{}, + &BuildConfig{}, + &BuildConfigList{}, + &BuildLog{}, + &BuildRequest{}, + &BuildLogOptions{}, + &BinaryBuildRequestOptions{}, + // This is needed for webhooks + &corev1.PodProxyOptions{}, + ) + metav1.AddToGroupVersion(scheme, GroupVersion) + return nil +} diff --git a/vendor/github.com/openshift/api/build/v1/types.go b/vendor/github.com/openshift/api/build/v1/types.go new file mode 100644 index 000000000..ba836aad8 --- /dev/null +++ b/vendor/github.com/openshift/api/build/v1/types.go @@ -0,0 +1,1469 @@ +package v1 + +import ( + "fmt" + "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:method=UpdateDetails,verb=update,subresource=details +// +genclient:method=Clone,verb=create,subresource=clone,input=BuildRequest +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Build encapsulates the inputs needed to produce a new deployable image, as well as +// the status of the execution and a reference to the Pod which executed the build. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type Build struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // spec is all the inputs used to execute the build. + Spec BuildSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // status is the current status of the build. + // +optional + Status BuildStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// BuildSpec has the information to represent a build and also additional +// information about a build +type BuildSpec struct { + // CommonSpec is the information that represents a build + CommonSpec `json:",inline" protobuf:"bytes,1,opt,name=commonSpec"` + + // triggeredBy describes which triggers started the most recent update to the + // build configuration and contains information about those triggers. + TriggeredBy []BuildTriggerCause `json:"triggeredBy,omitempty" protobuf:"bytes,2,rep,name=triggeredBy"` +} + +// OptionalNodeSelector is a map that may also be left nil to distinguish between set and unset. +// +protobuf.nullable=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +type OptionalNodeSelector map[string]string + +func (t OptionalNodeSelector) String() string { + return fmt.Sprintf("%v", map[string]string(t)) +} + +// CommonSpec encapsulates all the inputs necessary to represent a build. +type CommonSpec struct { + // serviceAccount is the name of the ServiceAccount to use to run the pod + // created by this build. + // The pod will be allowed to use secrets referenced by the ServiceAccount + ServiceAccount string `json:"serviceAccount,omitempty" protobuf:"bytes,1,opt,name=serviceAccount"` + + // source describes the SCM in use. + Source BuildSource `json:"source,omitempty" protobuf:"bytes,2,opt,name=source"` + + // revision is the information from the source for a specific repo snapshot. + // This is optional. + Revision *SourceRevision `json:"revision,omitempty" protobuf:"bytes,3,opt,name=revision"` + + // strategy defines how to perform a build. + Strategy BuildStrategy `json:"strategy" protobuf:"bytes,4,opt,name=strategy"` + + // output describes the container image the Strategy should produce. + Output BuildOutput `json:"output,omitempty" protobuf:"bytes,5,opt,name=output"` + + // resources computes resource requirements to execute the build. + Resources corev1.ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,6,opt,name=resources"` + + // postCommit is a build hook executed after the build output image is + // committed, before it is pushed to a registry. + PostCommit BuildPostCommitSpec `json:"postCommit,omitempty" protobuf:"bytes,7,opt,name=postCommit"` + + // completionDeadlineSeconds is an optional duration in seconds, counted from + // the time when a build pod gets scheduled in the system, that the build may + // be active on a node before the system actively tries to terminate the + // build; value must be positive integer + CompletionDeadlineSeconds *int64 `json:"completionDeadlineSeconds,omitempty" protobuf:"varint,8,opt,name=completionDeadlineSeconds"` + + // nodeSelector is a selector which must be true for the build pod to fit on a node + // If nil, it can be overridden by default build nodeselector values for the cluster. + // If set to an empty map or a map with any values, default build nodeselector values + // are ignored. + // +optional + NodeSelector OptionalNodeSelector `json:"nodeSelector" protobuf:"bytes,9,name=nodeSelector"` + + // mountTrustedCA bind mounts the cluster's trusted certificate authorities, as defined in + // the cluster's proxy configuration, into the build. This lets processes within a build trust + // components signed by custom PKI certificate authorities, such as private artifact + // repositories and HTTPS proxies. + // + // When this field is set to true, the contents of `/etc/pki/ca-trust` within the build are + // managed by the build container, and any changes to this directory or its subdirectories (for + // example - within a Dockerfile `RUN` instruction) are not persisted in the build's output image. + MountTrustedCA *bool `json:"mountTrustedCA,omitempty" protobuf:"varint,10,opt,name=mountTrustedCA"` +} + +// BuildTriggerCause holds information about a triggered build. It is used for +// displaying build trigger data for each build and build configuration in oc +// describe. It is also used to describe which triggers led to the most recent +// update in the build configuration. +type BuildTriggerCause struct { + // message is used to store a human readable message for why the build was + // triggered. E.g.: "Manually triggered by user", "Configuration change",etc. + Message string `json:"message,omitempty" protobuf:"bytes,1,opt,name=message"` + + // genericWebHook holds data about a builds generic webhook trigger. + GenericWebHook *GenericWebHookCause `json:"genericWebHook,omitempty" protobuf:"bytes,2,opt,name=genericWebHook"` + + // gitHubWebHook represents data for a GitHub webhook that fired a + //specific build. + GitHubWebHook *GitHubWebHookCause `json:"githubWebHook,omitempty" protobuf:"bytes,3,opt,name=githubWebHook"` + + // imageChangeBuild stores information about an imagechange event + // that triggered a new build. + ImageChangeBuild *ImageChangeCause `json:"imageChangeBuild,omitempty" protobuf:"bytes,4,opt,name=imageChangeBuild"` + + // GitLabWebHook represents data for a GitLab webhook that fired a specific + // build. + GitLabWebHook *GitLabWebHookCause `json:"gitlabWebHook,omitempty" protobuf:"bytes,5,opt,name=gitlabWebHook"` + + // BitbucketWebHook represents data for a Bitbucket webhook that fired a + // specific build. + BitbucketWebHook *BitbucketWebHookCause `json:"bitbucketWebHook,omitempty" protobuf:"bytes,6,opt,name=bitbucketWebHook"` +} + +// GenericWebHookCause holds information about a generic WebHook that +// triggered a build. +type GenericWebHookCause struct { + // revision is an optional field that stores the git source revision + // information of the generic webhook trigger when it is available. + Revision *SourceRevision `json:"revision,omitempty" protobuf:"bytes,1,opt,name=revision"` + + // secret is the obfuscated webhook secret that triggered a build. + Secret string `json:"secret,omitempty" protobuf:"bytes,2,opt,name=secret"` +} + +// GitHubWebHookCause has information about a GitHub webhook that triggered a +// build. +type GitHubWebHookCause struct { + // revision is the git revision information of the trigger. + Revision *SourceRevision `json:"revision,omitempty" protobuf:"bytes,1,opt,name=revision"` + + // secret is the obfuscated webhook secret that triggered a build. + Secret string `json:"secret,omitempty" protobuf:"bytes,2,opt,name=secret"` +} + +// CommonWebHookCause factors out the identical format of these webhook +// causes into struct so we can share it in the specific causes; it is too late for +// GitHub and Generic but we can leverage this pattern with GitLab and Bitbucket. +type CommonWebHookCause struct { + // Revision is the git source revision information of the trigger. + Revision *SourceRevision `json:"revision,omitempty" protobuf:"bytes,1,opt,name=revision"` + + // Secret is the obfuscated webhook secret that triggered a build. + Secret string `json:"secret,omitempty" protobuf:"bytes,2,opt,name=secret"` +} + +// GitLabWebHookCause has information about a GitLab webhook that triggered a +// build. +type GitLabWebHookCause struct { + CommonWebHookCause `json:",inline" protobuf:"bytes,1,opt,name=commonSpec"` +} + +// BitbucketWebHookCause has information about a Bitbucket webhook that triggered a +// build. +type BitbucketWebHookCause struct { + CommonWebHookCause `json:",inline" protobuf:"bytes,1,opt,name=commonSpec"` +} + +// ImageChangeCause contains information about the image that triggered a +// build +type ImageChangeCause struct { + // imageID is the ID of the image that triggered a new build. + ImageID string `json:"imageID,omitempty" protobuf:"bytes,1,opt,name=imageID"` + + // fromRef contains detailed information about an image that triggered a + // build. + FromRef *corev1.ObjectReference `json:"fromRef,omitempty" protobuf:"bytes,2,opt,name=fromRef"` +} + +// BuildStatus contains the status of a build +type BuildStatus struct { + // phase is the point in the build lifecycle. Possible values are + // "New", "Pending", "Running", "Complete", "Failed", "Error", and "Cancelled". + Phase BuildPhase `json:"phase" protobuf:"bytes,1,opt,name=phase,casttype=BuildPhase"` + + // cancelled describes if a cancel event was triggered for the build. + Cancelled bool `json:"cancelled,omitempty" protobuf:"varint,2,opt,name=cancelled"` + + // reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI. + Reason StatusReason `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason,casttype=StatusReason"` + + // message is a human-readable message indicating details about why the build has this status. + Message string `json:"message,omitempty" protobuf:"bytes,4,opt,name=message"` + + // startTimestamp is a timestamp representing the server time when this Build started + // running in a Pod. + // It is represented in RFC3339 form and is in UTC. + StartTimestamp *metav1.Time `json:"startTimestamp,omitempty" protobuf:"bytes,5,opt,name=startTimestamp"` + + // completionTimestamp is a timestamp representing the server time when this Build was + // finished, whether that build failed or succeeded. It reflects the time at which + // the Pod running the Build terminated. + // It is represented in RFC3339 form and is in UTC. + CompletionTimestamp *metav1.Time `json:"completionTimestamp,omitempty" protobuf:"bytes,6,opt,name=completionTimestamp"` + + // duration contains time.Duration object describing build time. + Duration time.Duration `json:"duration,omitempty" protobuf:"varint,7,opt,name=duration,casttype=time.Duration"` + + // outputDockerImageReference contains a reference to the container image that + // will be built by this build. Its value is computed from + // Build.Spec.Output.To, and should include the registry address, so that + // it can be used to push and pull the image. + OutputDockerImageReference string `json:"outputDockerImageReference,omitempty" protobuf:"bytes,8,opt,name=outputDockerImageReference"` + + // config is an ObjectReference to the BuildConfig this Build is based on. + Config *corev1.ObjectReference `json:"config,omitempty" protobuf:"bytes,9,opt,name=config"` + + // output describes the container image the build has produced. + Output BuildStatusOutput `json:"output,omitempty" protobuf:"bytes,10,opt,name=output"` + + // stages contains details about each stage that occurs during the build + // including start time, duration (in milliseconds), and the steps that + // occured within each stage. + Stages []StageInfo `json:"stages,omitempty" protobuf:"bytes,11,opt,name=stages"` + + // logSnippet is the last few lines of the build log. This value is only set for builds that failed. + LogSnippet string `json:"logSnippet,omitempty" protobuf:"bytes,12,opt,name=logSnippet"` + + // Conditions represents the latest available observations of a build's current state. + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []BuildCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,13,rep,name=conditions"` +} + +// StageInfo contains details about a build stage. +type StageInfo struct { + // name is a unique identifier for each build stage that occurs. + Name StageName `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` + + // startTime is a timestamp representing the server time when this Stage started. + // It is represented in RFC3339 form and is in UTC. + StartTime metav1.Time `json:"startTime,omitempty" protobuf:"bytes,2,opt,name=startTime"` + + // durationMilliseconds identifies how long the stage took + // to complete in milliseconds. + // Note: the duration of a stage can exceed the sum of the duration of the steps within + // the stage as not all actions are accounted for in explicit build steps. + DurationMilliseconds int64 `json:"durationMilliseconds,omitempty" protobuf:"varint,3,opt,name=durationMilliseconds"` + + // steps contains details about each step that occurs during a build stage + // including start time and duration in milliseconds. + Steps []StepInfo `json:"steps,omitempty" protobuf:"bytes,4,opt,name=steps"` +} + +// StageName is the unique identifier for each build stage. +type StageName string + +// Valid values for StageName +const ( + // StageFetchInputs fetches any inputs such as source code. + StageFetchInputs StageName = "FetchInputs" + + // StagePullImages pulls any images that are needed such as + // base images or input images. + StagePullImages StageName = "PullImages" + + // StageBuild performs the steps necessary to build the image. + StageBuild StageName = "Build" + + // StagePostCommit executes any post commit steps. + StagePostCommit StageName = "PostCommit" + + // StagePushImage pushes the image to the node. + StagePushImage StageName = "PushImage" +) + +// StepInfo contains details about a build step. +type StepInfo struct { + // name is a unique identifier for each build step. + Name StepName `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` + + // startTime is a timestamp representing the server time when this Step started. + // it is represented in RFC3339 form and is in UTC. + StartTime metav1.Time `json:"startTime,omitempty" protobuf:"bytes,2,opt,name=startTime"` + + // durationMilliseconds identifies how long the step took + // to complete in milliseconds. + DurationMilliseconds int64 `json:"durationMilliseconds,omitempty" protobuf:"varint,3,opt,name=durationMilliseconds"` +} + +// StepName is a unique identifier for each build step. +type StepName string + +// Valid values for StepName +const ( + // StepExecPostCommitHook executes the buildconfigs post commit hook. + StepExecPostCommitHook StepName = "RunPostCommitHook" + + // StepFetchGitSource fetches source code for the build. + StepFetchGitSource StepName = "FetchGitSource" + + // StepPullBaseImage pulls a base image for the build. + StepPullBaseImage StepName = "PullBaseImage" + + // StepPullInputImage pulls an input image for the build. + StepPullInputImage StepName = "PullInputImage" + + // StepPushImage pushes an image to the registry. + StepPushImage StepName = "PushImage" + + // StepPushDockerImage pushes a container image to the registry. + StepPushDockerImage StepName = "PushDockerImage" + + //StepDockerBuild performs the container image build + StepDockerBuild StepName = "DockerBuild" +) + +// BuildPhase represents the status of a build at a point in time. +type BuildPhase string + +// Valid values for BuildPhase. +const ( + // BuildPhaseNew is automatically assigned to a newly created build. + BuildPhaseNew BuildPhase = "New" + + // BuildPhasePending indicates that a pod name has been assigned and a build is + // about to start running. + BuildPhasePending BuildPhase = "Pending" + + // BuildPhaseRunning indicates that a pod has been created and a build is running. + BuildPhaseRunning BuildPhase = "Running" + + // BuildPhaseComplete indicates that a build has been successful. + BuildPhaseComplete BuildPhase = "Complete" + + // BuildPhaseFailed indicates that a build has executed and failed. + BuildPhaseFailed BuildPhase = "Failed" + + // BuildPhaseError indicates that an error prevented the build from executing. + BuildPhaseError BuildPhase = "Error" + + // BuildPhaseCancelled indicates that a running/pending build was stopped from executing. + BuildPhaseCancelled BuildPhase = "Cancelled" +) + +type BuildConditionType string + +// BuildCondition describes the state of a build at a certain point. +type BuildCondition struct { + // Type of build condition. + Type BuildConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=BuildConditionType"` + // Status of the condition, one of True, False, Unknown. + Status corev1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/kubernetes/pkg/api/v1.ConditionStatus"` + // The last time this condition was updated. + LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty" protobuf:"bytes,6,opt,name=lastUpdateTime"` + // The last time the condition transitioned from one status to another. + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // The reason for the condition's last transition. + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // A human readable message indicating details about the transition. + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` +} + +// StatusReason is a brief CamelCase string that describes a temporary or +// permanent build error condition, meant for machine parsing and tidy display +// in the CLI. +type StatusReason string + +// BuildStatusOutput contains the status of the built image. +type BuildStatusOutput struct { + // to describes the status of the built image being pushed to a registry. + To *BuildStatusOutputTo `json:"to,omitempty" protobuf:"bytes,1,opt,name=to"` +} + +// BuildStatusOutputTo describes the status of the built image with regards to +// image registry to which it was supposed to be pushed. +type BuildStatusOutputTo struct { + // imageDigest is the digest of the built container image. The digest uniquely + // identifies the image in the registry to which it was pushed. + // + // Please note that this field may not always be set even if the push + // completes successfully - e.g. when the registry returns no digest or + // returns it in a format that the builder doesn't understand. + ImageDigest string `json:"imageDigest,omitempty" protobuf:"bytes,1,opt,name=imageDigest"` +} + +// BuildSourceType is the type of SCM used. +type BuildSourceType string + +// Valid values for BuildSourceType. +const ( + //BuildSourceGit instructs a build to use a Git source control repository as the build input. + BuildSourceGit BuildSourceType = "Git" + // BuildSourceDockerfile uses a Dockerfile as the start of a build + BuildSourceDockerfile BuildSourceType = "Dockerfile" + // BuildSourceBinary indicates the build will accept a Binary file as input. + BuildSourceBinary BuildSourceType = "Binary" + // BuildSourceImage indicates the build will accept an image as input + BuildSourceImage BuildSourceType = "Image" + // BuildSourceNone indicates the build has no predefined input (only valid for Source and Custom Strategies) + BuildSourceNone BuildSourceType = "None" +) + +// BuildSource is the SCM used for the build. +type BuildSource struct { + // type of build input to accept + // +k8s:conversion-gen=false + // +optional + Type BuildSourceType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=BuildSourceType"` + + // binary builds accept a binary as their input. The binary is generally assumed to be a tar, + // gzipped tar, or zip file depending on the strategy. For container image builds, this is the build + // context and an optional Dockerfile may be specified to override any Dockerfile in the + // build context. For Source builds, this is assumed to be an archive as described above. For + // Source and container image builds, if binary.asFile is set the build will receive a directory with + // a single file. contextDir may be used when an archive is provided. Custom builds will + // receive this binary as input on STDIN. + Binary *BinaryBuildSource `json:"binary,omitempty" protobuf:"bytes,2,opt,name=binary"` + + // dockerfile is the raw contents of a Dockerfile which should be built. When this option is + // specified, the FROM may be modified based on your strategy base image and additional ENV + // stanzas from your strategy environment will be added after the FROM, but before the rest + // of your Dockerfile stanzas. The Dockerfile source type may be used with other options like + // git - in those cases the Git repo will have any innate Dockerfile replaced in the context + // dir. + Dockerfile *string `json:"dockerfile,omitempty" protobuf:"bytes,3,opt,name=dockerfile"` + + // git contains optional information about git build source + Git *GitBuildSource `json:"git,omitempty" protobuf:"bytes,4,opt,name=git"` + + // images describes a set of images to be used to provide source for the build + Images []ImageSource `json:"images,omitempty" protobuf:"bytes,5,rep,name=images"` + + // contextDir specifies the sub-directory where the source code for the application exists. + // This allows to have buildable sources in directory other than root of + // repository. + ContextDir string `json:"contextDir,omitempty" protobuf:"bytes,6,opt,name=contextDir"` + + // sourceSecret is the name of a Secret that would be used for setting + // up the authentication for cloning private repository. + // The secret contains valid credentials for remote repository, where the + // data's key represent the authentication method to be used and value is + // the base64 encoded credentials. Supported auth methods are: ssh-privatekey. + SourceSecret *corev1.LocalObjectReference `json:"sourceSecret,omitempty" protobuf:"bytes,7,opt,name=sourceSecret"` + + // secrets represents a list of secrets and their destinations that will + // be used only for the build. + Secrets []SecretBuildSource `json:"secrets,omitempty" protobuf:"bytes,8,rep,name=secrets"` + + // configMaps represents a list of configMaps and their destinations that will + // be used for the build. + ConfigMaps []ConfigMapBuildSource `json:"configMaps,omitempty" protobuf:"bytes,9,rep,name=configMaps"` +} + +// ImageSource is used to describe build source that will be extracted from an image or used during a +// multi stage build. A reference of type ImageStreamTag, ImageStreamImage or DockerImage may be used. +// A pull secret can be specified to pull the image from an external registry or override the default +// service account secret if pulling from the internal registry. Image sources can either be used to +// extract content from an image and place it into the build context along with the repository source, +// or used directly during a multi-stage container image build to allow content to be copied without overwriting +// the contents of the repository source (see the 'paths' and 'as' fields). +type ImageSource struct { + // from is a reference to an ImageStreamTag, ImageStreamImage, or DockerImage to + // copy source from. + From corev1.ObjectReference `json:"from" protobuf:"bytes,1,opt,name=from"` + + // A list of image names that this source will be used in place of during a multi-stage container image + // build. For instance, a Dockerfile that uses "COPY --from=nginx:latest" will first check for an image + // source that has "nginx:latest" in this field before attempting to pull directly. If the Dockerfile + // does not reference an image source it is ignored. This field and paths may both be set, in which case + // the contents will be used twice. + // +optional + As []string `json:"as,omitempty" protobuf:"bytes,4,rep,name=as"` + + // paths is a list of source and destination paths to copy from the image. This content will be copied + // into the build context prior to starting the build. If no paths are set, the build context will + // not be altered. + // +optional + Paths []ImageSourcePath `json:"paths,omitempty" protobuf:"bytes,2,rep,name=paths"` + + // pullSecret is a reference to a secret to be used to pull the image from a registry + // If the image is pulled from the OpenShift registry, this field does not need to be set. + PullSecret *corev1.LocalObjectReference `json:"pullSecret,omitempty" protobuf:"bytes,3,opt,name=pullSecret"` +} + +// ImageSourcePath describes a path to be copied from a source image and its destination within the build directory. +type ImageSourcePath struct { + // sourcePath is the absolute path of the file or directory inside the image to + // copy to the build directory. If the source path ends in /. then the content of + // the directory will be copied, but the directory itself will not be created at the + // destination. + SourcePath string `json:"sourcePath" protobuf:"bytes,1,opt,name=sourcePath"` + + // destinationDir is the relative directory within the build directory + // where files copied from the image are placed. + DestinationDir string `json:"destinationDir" protobuf:"bytes,2,opt,name=destinationDir"` +} + +// SecretBuildSource describes a secret and its destination directory that will be +// used only at the build time. The content of the secret referenced here will +// be copied into the destination directory instead of mounting. +type SecretBuildSource struct { + // secret is a reference to an existing secret that you want to use in your + // build. + Secret corev1.LocalObjectReference `json:"secret" protobuf:"bytes,1,opt,name=secret"` + + // destinationDir is the directory where the files from the secret should be + // available for the build time. + // For the Source build strategy, these will be injected into a container + // where the assemble script runs. Later, when the script finishes, all files + // injected will be truncated to zero length. + // For the container image build strategy, these will be copied into the build + // directory, where the Dockerfile is located, so users can ADD or COPY them + // during container image build. + DestinationDir string `json:"destinationDir,omitempty" protobuf:"bytes,2,opt,name=destinationDir"` +} + +// ConfigMapBuildSource describes a configmap and its destination directory that will be +// used only at the build time. The content of the configmap referenced here will +// be copied into the destination directory instead of mounting. +type ConfigMapBuildSource struct { + // configMap is a reference to an existing configmap that you want to use in your + // build. + ConfigMap corev1.LocalObjectReference `json:"configMap" protobuf:"bytes,1,opt,name=configMap"` + + // destinationDir is the directory where the files from the configmap should be + // available for the build time. + // For the Source build strategy, these will be injected into a container + // where the assemble script runs. + // For the container image build strategy, these will be copied into the build + // directory, where the Dockerfile is located, so users can ADD or COPY them + // during container image build. + DestinationDir string `json:"destinationDir,omitempty" protobuf:"bytes,2,opt,name=destinationDir"` +} + +// BinaryBuildSource describes a binary file to be used for the Docker and Source build strategies, +// where the file will be extracted and used as the build source. +type BinaryBuildSource struct { + // asFile indicates that the provided binary input should be considered a single file + // within the build input. For example, specifying "webapp.war" would place the provided + // binary as `/webapp.war` for the builder. If left empty, the Docker and Source build + // strategies assume this file is a zip, tar, or tar.gz file and extract it as the source. + // The custom strategy receives this binary as standard input. This filename may not + // contain slashes or be '..' or '.'. + AsFile string `json:"asFile,omitempty" protobuf:"bytes,1,opt,name=asFile"` +} + +// SourceRevision is the revision or commit information from the source for the build +type SourceRevision struct { + // type of the build source, may be one of 'Source', 'Dockerfile', 'Binary', or 'Images' + // +k8s:conversion-gen=false + Type BuildSourceType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=BuildSourceType"` + + // Git contains information about git-based build source + Git *GitSourceRevision `json:"git,omitempty" protobuf:"bytes,2,opt,name=git"` +} + +// GitSourceRevision is the commit information from a git source for a build +type GitSourceRevision struct { + // commit is the commit hash identifying a specific commit + Commit string `json:"commit,omitempty" protobuf:"bytes,1,opt,name=commit"` + + // author is the author of a specific commit + Author SourceControlUser `json:"author,omitempty" protobuf:"bytes,2,opt,name=author"` + + // committer is the committer of a specific commit + Committer SourceControlUser `json:"committer,omitempty" protobuf:"bytes,3,opt,name=committer"` + + // message is the description of a specific commit + Message string `json:"message,omitempty" protobuf:"bytes,4,opt,name=message"` +} + +// ProxyConfig defines what proxies to use for an operation +type ProxyConfig struct { + // httpProxy is a proxy used to reach the git repository over http + HTTPProxy *string `json:"httpProxy,omitempty" protobuf:"bytes,3,opt,name=httpProxy"` + + // httpsProxy is a proxy used to reach the git repository over https + HTTPSProxy *string `json:"httpsProxy,omitempty" protobuf:"bytes,4,opt,name=httpsProxy"` + + // noProxy is the list of domains for which the proxy should not be used + NoProxy *string `json:"noProxy,omitempty" protobuf:"bytes,5,opt,name=noProxy"` +} + +// GitBuildSource defines the parameters of a Git SCM +type GitBuildSource struct { + // uri points to the source that will be built. The structure of the source + // will depend on the type of build to run + URI string `json:"uri" protobuf:"bytes,1,opt,name=uri"` + + // ref is the branch/tag/ref to build. + Ref string `json:"ref,omitempty" protobuf:"bytes,2,opt,name=ref"` + + // proxyConfig defines the proxies to use for the git clone operation. Values + // not set here are inherited from cluster-wide build git proxy settings. + ProxyConfig `json:",inline" protobuf:"bytes,3,opt,name=proxyConfig"` +} + +// SourceControlUser defines the identity of a user of source control +type SourceControlUser struct { + // name of the source control user + Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` + + // email of the source control user + Email string `json:"email,omitempty" protobuf:"bytes,2,opt,name=email"` +} + +// BuildStrategy contains the details of how to perform a build. +type BuildStrategy struct { + // type is the kind of build strategy. + // +k8s:conversion-gen=false + // +optional + Type BuildStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=BuildStrategyType"` + + // dockerStrategy holds the parameters to the container image build strategy. + DockerStrategy *DockerBuildStrategy `json:"dockerStrategy,omitempty" protobuf:"bytes,2,opt,name=dockerStrategy"` + + // sourceStrategy holds the parameters to the Source build strategy. + SourceStrategy *SourceBuildStrategy `json:"sourceStrategy,omitempty" protobuf:"bytes,3,opt,name=sourceStrategy"` + + // customStrategy holds the parameters to the Custom build strategy + CustomStrategy *CustomBuildStrategy `json:"customStrategy,omitempty" protobuf:"bytes,4,opt,name=customStrategy"` + + // JenkinsPipelineStrategy holds the parameters to the Jenkins Pipeline build strategy. + // Deprecated: use OpenShift Pipelines + JenkinsPipelineStrategy *JenkinsPipelineBuildStrategy `json:"jenkinsPipelineStrategy,omitempty" protobuf:"bytes,5,opt,name=jenkinsPipelineStrategy"` +} + +// BuildStrategyType describes a particular way of performing a build. +type BuildStrategyType string + +// Valid values for BuildStrategyType. +const ( + // DockerBuildStrategyType performs builds using a Dockerfile. + DockerBuildStrategyType BuildStrategyType = "Docker" + + // SourceBuildStrategyType performs builds build using Source To Images with a Git repository + // and a builder image. + SourceBuildStrategyType BuildStrategyType = "Source" + + // CustomBuildStrategyType performs builds using custom builder container image. + CustomBuildStrategyType BuildStrategyType = "Custom" + + // JenkinsPipelineBuildStrategyType indicates the build will run via Jenkine Pipeline. + JenkinsPipelineBuildStrategyType BuildStrategyType = "JenkinsPipeline" +) + +// CustomBuildStrategy defines input parameters specific to Custom build. +type CustomBuildStrategy struct { + // from is reference to an DockerImage, ImageStreamTag, or ImageStreamImage from which + // the container image should be pulled + From corev1.ObjectReference `json:"from" protobuf:"bytes,1,opt,name=from"` + + // pullSecret is the name of a Secret that would be used for setting up + // the authentication for pulling the container images from the private Docker + // registries + PullSecret *corev1.LocalObjectReference `json:"pullSecret,omitempty" protobuf:"bytes,2,opt,name=pullSecret"` + + // env contains additional environment variables you want to pass into a builder container. + Env []corev1.EnvVar `json:"env,omitempty" protobuf:"bytes,3,rep,name=env"` + + // exposeDockerSocket will allow running Docker commands (and build container images) from + // inside the container. + // TODO: Allow admins to enforce 'false' for this option + ExposeDockerSocket bool `json:"exposeDockerSocket,omitempty" protobuf:"varint,4,opt,name=exposeDockerSocket"` + + // forcePull describes if the controller should configure the build pod to always pull the images + // for the builder or only pull if it is not present locally + ForcePull bool `json:"forcePull,omitempty" protobuf:"varint,5,opt,name=forcePull"` + + // secrets is a list of additional secrets that will be included in the build pod + Secrets []SecretSpec `json:"secrets,omitempty" protobuf:"bytes,6,rep,name=secrets"` + + // buildAPIVersion is the requested API version for the Build object serialized and passed to the custom builder + BuildAPIVersion string `json:"buildAPIVersion,omitempty" protobuf:"bytes,7,opt,name=buildAPIVersion"` +} + +// ImageOptimizationPolicy describes what optimizations the builder can perform when building images. +type ImageOptimizationPolicy string + +const ( + // ImageOptimizationNone will generate a canonical container image as produced by the + // `container image build` command. + ImageOptimizationNone ImageOptimizationPolicy = "None" + + // ImageOptimizationSkipLayers is an experimental policy and will avoid creating + // unique layers for each dockerfile line, resulting in smaller images and saving time + // during creation. Some Dockerfile syntax is not fully supported - content added to + // a VOLUME by an earlier layer may have incorrect uid, gid, and filesystem permissions. + // If an unsupported setting is detected, the build will fail. + ImageOptimizationSkipLayers ImageOptimizationPolicy = "SkipLayers" + + // ImageOptimizationSkipLayersAndWarn is the same as SkipLayers, but will only + // warn to the build output instead of failing when unsupported syntax is detected. This + // policy is experimental. + ImageOptimizationSkipLayersAndWarn ImageOptimizationPolicy = "SkipLayersAndWarn" +) + +// DockerBuildStrategy defines input parameters specific to container image build. +type DockerBuildStrategy struct { + // from is a reference to an DockerImage, ImageStreamTag, or ImageStreamImage which overrides + // the FROM image in the Dockerfile for the build. If the Dockerfile uses multi-stage builds, + // this will replace the image in the last FROM directive of the file. + From *corev1.ObjectReference `json:"from,omitempty" protobuf:"bytes,1,opt,name=from"` + + // pullSecret is the name of a Secret that would be used for setting up + // the authentication for pulling the container images from the private Docker + // registries + PullSecret *corev1.LocalObjectReference `json:"pullSecret,omitempty" protobuf:"bytes,2,opt,name=pullSecret"` + + // noCache if set to true indicates that the container image build must be executed with the + // --no-cache=true flag + NoCache bool `json:"noCache,omitempty" protobuf:"varint,3,opt,name=noCache"` + + // env contains additional environment variables you want to pass into a builder container. + Env []corev1.EnvVar `json:"env,omitempty" protobuf:"bytes,4,rep,name=env"` + + // forcePull describes if the builder should pull the images from registry prior to building. + ForcePull bool `json:"forcePull,omitempty" protobuf:"varint,5,opt,name=forcePull"` + + // dockerfilePath is the path of the Dockerfile that will be used to build the container image, + // relative to the root of the context (contextDir). + // Defaults to `Dockerfile` if unset. + DockerfilePath string `json:"dockerfilePath,omitempty" protobuf:"bytes,6,opt,name=dockerfilePath"` + + // buildArgs contains build arguments that will be resolved in the Dockerfile. See + // https://docs.docker.com/engine/reference/builder/#/arg for more details. + // NOTE: Only the 'name' and 'value' fields are supported. Any settings on the 'valueFrom' field + // are ignored. + BuildArgs []corev1.EnvVar `json:"buildArgs,omitempty" protobuf:"bytes,7,rep,name=buildArgs"` + + // imageOptimizationPolicy describes what optimizations the system can use when building images + // to reduce the final size or time spent building the image. The default policy is 'None' which + // means the final build image will be equivalent to an image created by the container image build API. + // The experimental policy 'SkipLayers' will avoid commiting new layers in between each + // image step, and will fail if the Dockerfile cannot provide compatibility with the 'None' + // policy. An additional experimental policy 'SkipLayersAndWarn' is the same as + // 'SkipLayers' but simply warns if compatibility cannot be preserved. + ImageOptimizationPolicy *ImageOptimizationPolicy `json:"imageOptimizationPolicy,omitempty" protobuf:"bytes,8,opt,name=imageOptimizationPolicy,casttype=ImageOptimizationPolicy"` + + // volumes is a list of input volumes that can be mounted into the builds runtime environment. + // Only a subset of Kubernetes Volume sources are supported by builds. + // More info: https://kubernetes.io/docs/concepts/storage/volumes + // +listType=map + // +listMapKey=name + // +patchMergeKey=name + // +patchStrategy=merge + Volumes []BuildVolume `json:"volumes,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,9,opt,name=volumes"` +} + +// SourceBuildStrategy defines input parameters specific to an Source build. +type SourceBuildStrategy struct { + // from is reference to an DockerImage, ImageStreamTag, or ImageStreamImage from which + // the container image should be pulled + From corev1.ObjectReference `json:"from" protobuf:"bytes,1,opt,name=from"` + + // pullSecret is the name of a Secret that would be used for setting up + // the authentication for pulling the container images from the private Docker + // registries + PullSecret *corev1.LocalObjectReference `json:"pullSecret,omitempty" protobuf:"bytes,2,opt,name=pullSecret"` + + // env contains additional environment variables you want to pass into a builder container. + Env []corev1.EnvVar `json:"env,omitempty" protobuf:"bytes,3,rep,name=env"` + + // scripts is the location of Source scripts + Scripts string `json:"scripts,omitempty" protobuf:"bytes,4,opt,name=scripts"` + + // incremental flag forces the Source build to do incremental builds if true. + Incremental *bool `json:"incremental,omitempty" protobuf:"varint,5,opt,name=incremental"` + + // forcePull describes if the builder should pull the images from registry prior to building. + ForcePull bool `json:"forcePull,omitempty" protobuf:"varint,6,opt,name=forcePull"` + + // deprecated json field, do not reuse: runtimeImage + // +k8s:protobuf-deprecated=runtimeImage,7 + + // deprecated json field, do not reuse: runtimeArtifacts + // +k8s:protobuf-deprecated=runtimeArtifacts,8 + + // volumes is a list of input volumes that can be mounted into the builds runtime environment. + // Only a subset of Kubernetes Volume sources are supported by builds. + // More info: https://kubernetes.io/docs/concepts/storage/volumes + // +listType=map + // +listMapKey=name + // +patchMergeKey=name + // +patchStrategy=merge + Volumes []BuildVolume `json:"volumes,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,9,opt,name=volumes"` +} + +// JenkinsPipelineBuildStrategy holds parameters specific to a Jenkins Pipeline build. +// Deprecated: use OpenShift Pipelines +type JenkinsPipelineBuildStrategy struct { + // JenkinsfilePath is the optional path of the Jenkinsfile that will be used to configure the pipeline + // relative to the root of the context (contextDir). If both JenkinsfilePath & Jenkinsfile are + // both not specified, this defaults to Jenkinsfile in the root of the specified contextDir. + JenkinsfilePath string `json:"jenkinsfilePath,omitempty" protobuf:"bytes,1,opt,name=jenkinsfilePath"` + + // Jenkinsfile defines the optional raw contents of a Jenkinsfile which defines a Jenkins pipeline build. + Jenkinsfile string `json:"jenkinsfile,omitempty" protobuf:"bytes,2,opt,name=jenkinsfile"` + + // env contains additional environment variables you want to pass into a build pipeline. + Env []corev1.EnvVar `json:"env,omitempty" protobuf:"bytes,3,rep,name=env"` +} + +// A BuildPostCommitSpec holds a build post commit hook specification. The hook +// executes a command in a temporary container running the build output image, +// immediately after the last layer of the image is committed and before the +// image is pushed to a registry. The command is executed with the current +// working directory ($PWD) set to the image's WORKDIR. +// +// The build will be marked as failed if the hook execution fails. It will fail +// if the script or command return a non-zero exit code, or if there is any +// other error related to starting the temporary container. +// +// There are five different ways to configure the hook. As an example, all forms +// below are equivalent and will execute `rake test --verbose`. +// +// 1. Shell script: +// +// "postCommit": { +// "script": "rake test --verbose", +// } +// +// The above is a convenient form which is equivalent to: +// +// "postCommit": { +// "command": ["/bin/sh", "-ic"], +// "args": ["rake test --verbose"] +// } +// +// 2. A command as the image entrypoint: +// +// "postCommit": { +// "commit": ["rake", "test", "--verbose"] +// } +// +// Command overrides the image entrypoint in the exec form, as documented in +// Docker: https://docs.docker.com/engine/reference/builder/#entrypoint. +// +// 3. Pass arguments to the default entrypoint: +// +// "postCommit": { +// "args": ["rake", "test", "--verbose"] +// } +// +// This form is only useful if the image entrypoint can handle arguments. +// +// 4. Shell script with arguments: +// +// "postCommit": { +// "script": "rake test $1", +// "args": ["--verbose"] +// } +// +// This form is useful if you need to pass arguments that would otherwise be +// hard to quote properly in the shell script. In the script, $0 will be +// "/bin/sh" and $1, $2, etc, are the positional arguments from Args. +// +// 5. Command with arguments: +// +// "postCommit": { +// "command": ["rake", "test"], +// "args": ["--verbose"] +// } +// +// This form is equivalent to appending the arguments to the Command slice. +// +// It is invalid to provide both Script and Command simultaneously. If none of +// the fields are specified, the hook is not executed. +type BuildPostCommitSpec struct { + // command is the command to run. It may not be specified with Script. + // This might be needed if the image doesn't have `/bin/sh`, or if you + // do not want to use a shell. In all other cases, using Script might be + // more convenient. + Command []string `json:"command,omitempty" protobuf:"bytes,1,rep,name=command"` + // args is a list of arguments that are provided to either Command, + // Script or the container image's default entrypoint. The arguments are + // placed immediately after the command to be run. + Args []string `json:"args,omitempty" protobuf:"bytes,2,rep,name=args"` + // script is a shell script to be run with `/bin/sh -ic`. It may not be + // specified with Command. Use Script when a shell script is appropriate + // to execute the post build hook, for example for running unit tests + // with `rake test`. If you need control over the image entrypoint, or + // if the image does not have `/bin/sh`, use Command and/or Args. + // The `-i` flag is needed to support CentOS and RHEL images that use + // Software Collections (SCL), in order to have the appropriate + // collections enabled in the shell. E.g., in the Ruby image, this is + // necessary to make `ruby`, `bundle` and other binaries available in + // the PATH. + Script string `json:"script,omitempty" protobuf:"bytes,3,opt,name=script"` +} + +// BuildOutput is input to a build strategy and describes the container image that the strategy +// should produce. +type BuildOutput struct { + // to defines an optional location to push the output of this build to. + // Kind must be one of 'ImageStreamTag' or 'DockerImage'. + // This value will be used to look up a container image repository to push to. + // In the case of an ImageStreamTag, the ImageStreamTag will be looked for in the namespace of + // the build unless Namespace is specified. + To *corev1.ObjectReference `json:"to,omitempty" protobuf:"bytes,1,opt,name=to"` + + // PushSecret is the name of a Secret that would be used for setting + // up the authentication for executing the Docker push to authentication + // enabled Docker Registry (or Docker Hub). + PushSecret *corev1.LocalObjectReference `json:"pushSecret,omitempty" protobuf:"bytes,2,opt,name=pushSecret"` + + // imageLabels define a list of labels that are applied to the resulting image. If there + // are multiple labels with the same name then the last one in the list is used. + ImageLabels []ImageLabel `json:"imageLabels,omitempty" protobuf:"bytes,3,rep,name=imageLabels"` +} + +// ImageLabel represents a label applied to the resulting image. +type ImageLabel struct { + // name defines the name of the label. It must have non-zero length. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + + // value defines the literal value of the label. + Value string `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"` +} + +// +genclient +// +genclient:method=Instantiate,verb=create,subresource=instantiate,input=BuildRequest,result=Build +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Build configurations define a build process for new container images. There are three types of builds possible - a container image build using a Dockerfile, a Source-to-Image build that uses a specially prepared base image that accepts source code that it can make runnable, and a custom build that can run // arbitrary container images as a base and accept the build parameters. Builds run on the cluster and on completion are pushed to the container image registry specified in the "output" section. A build can be triggered via a webhook, when the base image changes, or when a user manually requests a new build be // created. +// +// Each build created by a build configuration is numbered and refers back to its parent configuration. Multiple builds can be triggered at once. Builds that do not have "output" set can be used to test code or run a verification build. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type BuildConfig struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // spec holds all the input necessary to produce a new build, and the conditions when + // to trigger them. + Spec BuildConfigSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + // status holds any relevant information about a build config + // +optional + Status BuildConfigStatus `json:"status" protobuf:"bytes,3,opt,name=status"` +} + +// BuildConfigSpec describes when and how builds are created +type BuildConfigSpec struct { + + //triggers determine how new Builds can be launched from a BuildConfig. If + //no triggers are defined, a new build can only occur as a result of an + //explicit client build creation. + // +optional + Triggers []BuildTriggerPolicy `json:"triggers,omitempty" protobuf:"bytes,1,rep,name=triggers"` + + // RunPolicy describes how the new build created from this build + // configuration will be scheduled for execution. + // This is optional, if not specified we default to "Serial". + RunPolicy BuildRunPolicy `json:"runPolicy,omitempty" protobuf:"bytes,2,opt,name=runPolicy,casttype=BuildRunPolicy"` + + // CommonSpec is the desired build specification + CommonSpec `json:",inline" protobuf:"bytes,3,opt,name=commonSpec"` + + // successfulBuildsHistoryLimit is the number of old successful builds to retain. + // When a BuildConfig is created, the 5 most recent successful builds are retained unless this value is set. + // If removed after the BuildConfig has been created, all successful builds are retained. + SuccessfulBuildsHistoryLimit *int32 `json:"successfulBuildsHistoryLimit,omitempty" protobuf:"varint,4,opt,name=successfulBuildsHistoryLimit"` + + // failedBuildsHistoryLimit is the number of old failed builds to retain. + // When a BuildConfig is created, the 5 most recent failed builds are retained unless this value is set. + // If removed after the BuildConfig has been created, all failed builds are retained. + FailedBuildsHistoryLimit *int32 `json:"failedBuildsHistoryLimit,omitempty" protobuf:"varint,5,opt,name=failedBuildsHistoryLimit"` +} + +// BuildRunPolicy defines the behaviour of how the new builds are executed +// from the existing build configuration. +type BuildRunPolicy string + +const ( + // BuildRunPolicyParallel schedules new builds immediately after they are + // created. Builds will be executed in parallel. + BuildRunPolicyParallel BuildRunPolicy = "Parallel" + + // BuildRunPolicySerial schedules new builds to execute in a sequence as + // they are created. Every build gets queued up and will execute when the + // previous build completes. This is the default policy. + BuildRunPolicySerial BuildRunPolicy = "Serial" + + // BuildRunPolicySerialLatestOnly schedules only the latest build to execute, + // cancelling all the previously queued build. + BuildRunPolicySerialLatestOnly BuildRunPolicy = "SerialLatestOnly" +) + +// BuildConfigStatus contains current state of the build config object. +type BuildConfigStatus struct { + // lastVersion is used to inform about number of last triggered build. + LastVersion int64 `json:"lastVersion" protobuf:"varint,1,opt,name=lastVersion"` + + // ImageChangeTriggers captures the runtime state of any ImageChangeTrigger specified in the BuildConfigSpec, + // including the value reconciled by the OpenShift APIServer for the lastTriggeredImageID. There is a single entry + // in this array for each image change trigger in spec. Each trigger status references the ImageStreamTag that acts as the source of the trigger. + ImageChangeTriggers []ImageChangeTriggerStatus `json:"imageChangeTriggers,omitempty" protobuf:"bytes,2,rep,name=imageChangeTriggers"` +} + +// SecretLocalReference contains information that points to the local secret being used +type SecretLocalReference struct { + // Name is the name of the resource in the same namespace being referenced + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` +} + +// WebHookTrigger is a trigger that gets invoked using a webhook type of post +type WebHookTrigger struct { + // secret used to validate requests. + // Deprecated: use SecretReference instead. + Secret string `json:"secret,omitempty" protobuf:"bytes,1,opt,name=secret"` + + // allowEnv determines whether the webhook can set environment variables; can only + // be set to true for GenericWebHook. + AllowEnv bool `json:"allowEnv,omitempty" protobuf:"varint,2,opt,name=allowEnv"` + + // secretReference is a reference to a secret in the same namespace, + // containing the value to be validated when the webhook is invoked. + // The secret being referenced must contain a key named "WebHookSecretKey", the value + // of which will be checked against the value supplied in the webhook invocation. + SecretReference *SecretLocalReference `json:"secretReference,omitempty" protobuf:"bytes,3,opt,name=secretReference"` +} + +// ImageChangeTrigger allows builds to be triggered when an ImageStream changes +type ImageChangeTrigger struct { + // lastTriggeredImageID is used internally by the ImageChangeController to save last + // used image ID for build + // This field is deprecated and will be removed in a future release. + // Deprecated + LastTriggeredImageID string `json:"lastTriggeredImageID,omitempty" protobuf:"bytes,1,opt,name=lastTriggeredImageID"` + + // from is a reference to an ImageStreamTag that will trigger a build when updated + // It is optional. If no From is specified, the From image from the build strategy + // will be used. Only one ImageChangeTrigger with an empty From reference is allowed in + // a build configuration. + From *corev1.ObjectReference `json:"from,omitempty" protobuf:"bytes,2,opt,name=from"` + + // paused is true if this trigger is temporarily disabled. Optional. + Paused bool `json:"paused,omitempty" protobuf:"varint,3,opt,name=paused"` +} + +// ImageStreamTagReference references the ImageStreamTag in an image change trigger by namespace and name. +type ImageStreamTagReference struct { + // namespace is the namespace where the ImageStreamTag for an ImageChangeTrigger is located + Namespace string `json:"namespace,omitempty" protobuf:"bytes,1,opt,name=namespace"` + + // name is the name of the ImageStreamTag for an ImageChangeTrigger + Name string `json:"name,omitempty" protobuf:"bytes,2,opt,name=name"` +} + +// ImageChangeTriggerStatus tracks the latest resolved status of the associated ImageChangeTrigger policy +// specified in the BuildConfigSpec.Triggers struct. +type ImageChangeTriggerStatus struct { + // lastTriggeredImageID represents the sha/id of the ImageStreamTag when a Build for this BuildConfig was started. + // The lastTriggeredImageID is updated each time a Build for this BuildConfig is started, even if this ImageStreamTag is not the reason the Build is started. + LastTriggeredImageID string `json:"lastTriggeredImageID,omitempty" protobuf:"bytes,1,opt,name=lastTriggeredImageID"` + + // from is the ImageStreamTag that is the source of the trigger. + From ImageStreamTagReference `json:"from,omitempty" protobuf:"bytes,2,opt,name=from"` + + // lastTriggerTime is the last time this particular ImageStreamTag triggered a Build to start. + // This field is only updated when this trigger specifically started a Build. + LastTriggerTime metav1.Time `json:"lastTriggerTime,omitempty" protobuf:"bytes,3,opt,name=lastTriggerTime"` +} + +// BuildTriggerPolicy describes a policy for a single trigger that results in a new Build. +type BuildTriggerPolicy struct { + // type is the type of build trigger. Valid values: + // + // - GitHub + // GitHubWebHookBuildTriggerType represents a trigger that launches builds on + // GitHub webhook invocations + // + // - Generic + // GenericWebHookBuildTriggerType represents a trigger that launches builds on + // generic webhook invocations + // + // - GitLab + // GitLabWebHookBuildTriggerType represents a trigger that launches builds on + // GitLab webhook invocations + // + // - Bitbucket + // BitbucketWebHookBuildTriggerType represents a trigger that launches builds on + // Bitbucket webhook invocations + // + // - ImageChange + // ImageChangeBuildTriggerType represents a trigger that launches builds on + // availability of a new version of an image + // + // - ConfigChange + // ConfigChangeBuildTriggerType will trigger a build on an initial build config creation + // WARNING: In the future the behavior will change to trigger a build on any config change + Type BuildTriggerType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=BuildTriggerType"` + + // github contains the parameters for a GitHub webhook type of trigger + GitHubWebHook *WebHookTrigger `json:"github,omitempty" protobuf:"bytes,2,opt,name=github"` + + // generic contains the parameters for a Generic webhook type of trigger + GenericWebHook *WebHookTrigger `json:"generic,omitempty" protobuf:"bytes,3,opt,name=generic"` + + // imageChange contains parameters for an ImageChange type of trigger + ImageChange *ImageChangeTrigger `json:"imageChange,omitempty" protobuf:"bytes,4,opt,name=imageChange"` + + // GitLabWebHook contains the parameters for a GitLab webhook type of trigger + GitLabWebHook *WebHookTrigger `json:"gitlab,omitempty" protobuf:"bytes,5,opt,name=gitlab"` + + // BitbucketWebHook contains the parameters for a Bitbucket webhook type of + // trigger + BitbucketWebHook *WebHookTrigger `json:"bitbucket,omitempty" protobuf:"bytes,6,opt,name=bitbucket"` +} + +// BuildTriggerType refers to a specific BuildTriggerPolicy implementation. +type BuildTriggerType string + +const ( + // GitHubWebHookBuildTriggerType represents a trigger that launches builds on + // GitHub webhook invocations + GitHubWebHookBuildTriggerType BuildTriggerType = "GitHub" + GitHubWebHookBuildTriggerTypeDeprecated BuildTriggerType = "github" + + // GenericWebHookBuildTriggerType represents a trigger that launches builds on + // generic webhook invocations + GenericWebHookBuildTriggerType BuildTriggerType = "Generic" + GenericWebHookBuildTriggerTypeDeprecated BuildTriggerType = "generic" + + // GitLabWebHookBuildTriggerType represents a trigger that launches builds on + // GitLab webhook invocations + GitLabWebHookBuildTriggerType BuildTriggerType = "GitLab" + + // BitbucketWebHookBuildTriggerType represents a trigger that launches builds on + // Bitbucket webhook invocations + BitbucketWebHookBuildTriggerType BuildTriggerType = "Bitbucket" + + // ImageChangeBuildTriggerType represents a trigger that launches builds on + // availability of a new version of an image + ImageChangeBuildTriggerType BuildTriggerType = "ImageChange" + ImageChangeBuildTriggerTypeDeprecated BuildTriggerType = "imageChange" + + // ConfigChangeBuildTriggerType will trigger a build on an initial build config creation + // WARNING: In the future the behavior will change to trigger a build on any config change + ConfigChangeBuildTriggerType BuildTriggerType = "ConfigChange" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// BuildList is a collection of Builds. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type BuildList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is a list of builds + Items []Build `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// BuildConfigList is a collection of BuildConfigs. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type BuildConfigList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is a list of build configs + Items []BuildConfig `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// GenericWebHookEvent is the payload expected for a generic webhook post +type GenericWebHookEvent struct { + // type is the type of source repository + // +k8s:conversion-gen=false + Type BuildSourceType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=BuildSourceType"` + + // git is the git information if the Type is BuildSourceGit + Git *GitInfo `json:"git,omitempty" protobuf:"bytes,2,opt,name=git"` + + // env contains additional environment variables you want to pass into a builder container. + // ValueFrom is not supported. + Env []corev1.EnvVar `json:"env,omitempty" protobuf:"bytes,3,rep,name=env"` + + // DockerStrategyOptions contains additional docker-strategy specific options for the build + DockerStrategyOptions *DockerStrategyOptions `json:"dockerStrategyOptions,omitempty" protobuf:"bytes,4,opt,name=dockerStrategyOptions"` +} + +// GitInfo is the aggregated git information for a generic webhook post +type GitInfo struct { + GitBuildSource `json:",inline" protobuf:"bytes,1,opt,name=gitBuildSource"` + GitSourceRevision `json:",inline" protobuf:"bytes,2,opt,name=gitSourceRevision"` + + // Refs is a list of GitRefs for the provided repo - generally sent + // when used from a post-receive hook. This field is optional and is + // used when sending multiple refs + Refs []GitRefInfo `json:"refs" protobuf:"bytes,3,rep,name=refs"` +} + +// GitRefInfo is a single ref +type GitRefInfo struct { + GitBuildSource `json:",inline" protobuf:"bytes,1,opt,name=gitBuildSource"` + GitSourceRevision `json:",inline" protobuf:"bytes,2,opt,name=gitSourceRevision"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// BuildLog is the (unused) resource associated with the build log redirector +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type BuildLog struct { + metav1.TypeMeta `json:",inline"` +} + +// DockerStrategyOptions contains extra strategy options for container image builds +type DockerStrategyOptions struct { + // Args contains any build arguments that are to be passed to Docker. See + // https://docs.docker.com/engine/reference/builder/#/arg for more details + BuildArgs []corev1.EnvVar `json:"buildArgs,omitempty" protobuf:"bytes,1,rep,name=buildArgs"` + + // noCache overrides the docker-strategy noCache option in the build config + NoCache *bool `json:"noCache,omitempty" protobuf:"varint,2,opt,name=noCache"` +} + +// SourceStrategyOptions contains extra strategy options for Source builds +type SourceStrategyOptions struct { + // incremental overrides the source-strategy incremental option in the build config + Incremental *bool `json:"incremental,omitempty" protobuf:"varint,1,opt,name=incremental"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// BuildRequest is the resource used to pass parameters to build generator +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type BuildRequest struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // revision is the information from the source for a specific repo snapshot. + Revision *SourceRevision `json:"revision,omitempty" protobuf:"bytes,2,opt,name=revision"` + + // triggeredByImage is the Image that triggered this build. + TriggeredByImage *corev1.ObjectReference `json:"triggeredByImage,omitempty" protobuf:"bytes,3,opt,name=triggeredByImage"` + + // from is the reference to the ImageStreamTag that triggered the build. + From *corev1.ObjectReference `json:"from,omitempty" protobuf:"bytes,4,opt,name=from"` + + // binary indicates a request to build from a binary provided to the builder + Binary *BinaryBuildSource `json:"binary,omitempty" protobuf:"bytes,5,opt,name=binary"` + + // lastVersion (optional) is the LastVersion of the BuildConfig that was used + // to generate the build. If the BuildConfig in the generator doesn't match, a build will + // not be generated. + LastVersion *int64 `json:"lastVersion,omitempty" protobuf:"varint,6,opt,name=lastVersion"` + + // env contains additional environment variables you want to pass into a builder container. + Env []corev1.EnvVar `json:"env,omitempty" protobuf:"bytes,7,rep,name=env"` + + // triggeredBy describes which triggers started the most recent update to the + // build configuration and contains information about those triggers. + TriggeredBy []BuildTriggerCause `json:"triggeredBy,omitempty" protobuf:"bytes,8,rep,name=triggeredBy"` + + // DockerStrategyOptions contains additional docker-strategy specific options for the build + DockerStrategyOptions *DockerStrategyOptions `json:"dockerStrategyOptions,omitempty" protobuf:"bytes,9,opt,name=dockerStrategyOptions"` + + // SourceStrategyOptions contains additional source-strategy specific options for the build + SourceStrategyOptions *SourceStrategyOptions `json:"sourceStrategyOptions,omitempty" protobuf:"bytes,10,opt,name=sourceStrategyOptions"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// BinaryBuildRequestOptions are the options required to fully speficy a binary build request +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type BinaryBuildRequestOptions struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // asFile determines if the binary should be created as a file within the source rather than extracted as an archive + AsFile string `json:"asFile,omitempty" protobuf:"bytes,2,opt,name=asFile"` + + // TODO: Improve map[string][]string conversion so we can handled nested objects + + // revision.commit is the value identifying a specific commit + Commit string `json:"revision.commit,omitempty" protobuf:"bytes,3,opt,name=revisionCommit"` + + // revision.message is the description of a specific commit + Message string `json:"revision.message,omitempty" protobuf:"bytes,4,opt,name=revisionMessage"` + + // revision.authorName of the source control user + AuthorName string `json:"revision.authorName,omitempty" protobuf:"bytes,5,opt,name=revisionAuthorName"` + + // revision.authorEmail of the source control user + AuthorEmail string `json:"revision.authorEmail,omitempty" protobuf:"bytes,6,opt,name=revisionAuthorEmail"` + + // revision.committerName of the source control user + CommitterName string `json:"revision.committerName,omitempty" protobuf:"bytes,7,opt,name=revisionCommitterName"` + + // revision.committerEmail of the source control user + CommitterEmail string `json:"revision.committerEmail,omitempty" protobuf:"bytes,8,opt,name=revisionCommitterEmail"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// BuildLogOptions is the REST options for a build log +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type BuildLogOptions struct { + metav1.TypeMeta `json:",inline"` + + // cointainer for which to stream logs. Defaults to only container if there is one container in the pod. + Container string `json:"container,omitempty" protobuf:"bytes,1,opt,name=container"` + // follow if true indicates that the build log should be streamed until + // the build terminates. + Follow bool `json:"follow,omitempty" protobuf:"varint,2,opt,name=follow"` + // previous returns previous build logs. Defaults to false. + Previous bool `json:"previous,omitempty" protobuf:"varint,3,opt,name=previous"` + // sinceSeconds is a relative time in seconds before the current time from which to show logs. If this value + // precedes the time a pod was started, only logs since the pod start will be returned. + // If this value is in the future, no logs will be returned. + // Only one of sinceSeconds or sinceTime may be specified. + SinceSeconds *int64 `json:"sinceSeconds,omitempty" protobuf:"varint,4,opt,name=sinceSeconds"` + // sinceTime is an RFC3339 timestamp from which to show logs. If this value + // precedes the time a pod was started, only logs since the pod start will be returned. + // If this value is in the future, no logs will be returned. + // Only one of sinceSeconds or sinceTime may be specified. + SinceTime *metav1.Time `json:"sinceTime,omitempty" protobuf:"bytes,5,opt,name=sinceTime"` + // timestamps, If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line + // of log output. Defaults to false. + Timestamps bool `json:"timestamps,omitempty" protobuf:"varint,6,opt,name=timestamps"` + // tailLines, If set, is the number of lines from the end of the logs to show. If not specified, + // logs are shown from the creation of the container or sinceSeconds or sinceTime + TailLines *int64 `json:"tailLines,omitempty" protobuf:"varint,7,opt,name=tailLines"` + // limitBytes, If set, is the number of bytes to read from the server before terminating the + // log output. This may not display a complete final line of logging, and may return + // slightly more or slightly less than the specified limit. + LimitBytes *int64 `json:"limitBytes,omitempty" protobuf:"varint,8,opt,name=limitBytes"` + + // noWait if true causes the call to return immediately even if the build + // is not available yet. Otherwise the server will wait until the build has started. + // TODO: Fix the tag to 'noWait' in v2 + NoWait bool `json:"nowait,omitempty" protobuf:"varint,9,opt,name=nowait"` + + // version of the build for which to view logs. + Version *int64 `json:"version,omitempty" protobuf:"varint,10,opt,name=version"` + + // insecureSkipTLSVerifyBackend indicates that the apiserver should not confirm the validity of the + // serving certificate of the backend it is connecting to. This will make the HTTPS connection between the apiserver + // and the backend insecure. This means the apiserver cannot verify the log data it is receiving came from the real + // kubelet. If the kubelet is configured to verify the apiserver's TLS credentials, it does not mean the + // connection to the real kubelet is vulnerable to a man in the middle attack (e.g. an attacker could not intercept + // the actual log data coming from the real kubelet). + // +optional + InsecureSkipTLSVerifyBackend bool `json:"insecureSkipTLSVerifyBackend,omitempty" protobuf:"varint,11,opt,name=insecureSkipTLSVerifyBackend"` +} + +// SecretSpec specifies a secret to be included in a build pod and its corresponding mount point +type SecretSpec struct { + // secretSource is a reference to the secret + SecretSource corev1.LocalObjectReference `json:"secretSource" protobuf:"bytes,1,opt,name=secretSource"` + + // mountPath is the path at which to mount the secret + MountPath string `json:"mountPath" protobuf:"bytes,2,opt,name=mountPath"` +} + +// BuildVolume describes a volume that is made available to build pods, +// such that it can be mounted into buildah's runtime environment. +// Only a subset of Kubernetes Volume sources are supported. +type BuildVolume struct { + // name is a unique identifier for this BuildVolume. + // It must conform to the Kubernetes DNS label standard and be unique within the pod. + // Names that collide with those added by the build controller will result in a + // failed build with an error message detailing which name caused the error. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + // +required + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + + // source represents the location and type of the mounted volume. + // +required + Source BuildVolumeSource `json:"source" protobuf:"bytes,2,opt,name=source"` + + // mounts represents the location of the volume in the image build container + // +required + // +listType=map + // +listMapKey=destinationPath + // +patchMergeKey=destinationPath + // +patchStrategy=merge + Mounts []BuildVolumeMount `json:"mounts" patchStrategy:"merge" patchMergeKey:"destinationPath" protobuf:"bytes,3,opt,name=mounts"` +} + +// BuildVolumeSourceType represents a build volume source type +type BuildVolumeSourceType string + +const ( + // BuildVolumeSourceTypeSecret is the Secret build source volume type + BuildVolumeSourceTypeSecret BuildVolumeSourceType = "Secret" + + // BuildVolumeSourceTypeConfigmap is the ConfigMap build source volume type + BuildVolumeSourceTypeConfigMap BuildVolumeSourceType = "ConfigMap" + + // BuildVolumeSourceTypeCSI is the CSI build source volume type + BuildVolumeSourceTypeCSI BuildVolumeSourceType = "CSI" +) + +// BuildVolumeSource represents the source of a volume to mount +// Only one of its supported types may be specified at any given time. +type BuildVolumeSource struct { + + // type is the BuildVolumeSourceType for the volume source. + // Type must match the populated volume source. + // Valid types are: Secret, ConfigMap + Type BuildVolumeSourceType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=BuildVolumeSourceType"` + + // secret represents a Secret that should populate this volume. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + // +optional + Secret *corev1.SecretVolumeSource `json:"secret,omitempty" protobuf:"bytes,2,opt,name=secret"` + + // configMap represents a ConfigMap that should populate this volume + // +optional + ConfigMap *corev1.ConfigMapVolumeSource `json:"configMap,omitempty" protobuf:"bytes,3,opt,name=configMap"` + + // csi represents ephemeral storage provided by external CSI drivers which support this capability + // +optional + CSI *corev1.CSIVolumeSource `json:"csi,omitempty" protobuf:"bytes,4,opt,name=csi"` +} + +// BuildVolumeMount describes the mounting of a Volume within buildah's runtime environment. +type BuildVolumeMount struct { + // destinationPath is the path within the buildah runtime environment at which the volume should be mounted. + // The transient mount within the build image and the backing volume will both be mounted read only. + // Must be an absolute path, must not contain '..' or ':', and must not collide with a destination path generated + // by the builder process + // Paths that collide with those added by the build controller will result in a + // failed build with an error message detailing which path caused the error. + DestinationPath string `json:"destinationPath" protobuf:"bytes,1,opt,name=destinationPath"` +} diff --git a/vendor/github.com/openshift/api/build/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/build/v1/zz_generated.deepcopy.go new file mode 100644 index 000000000..d36b28c82 --- /dev/null +++ b/vendor/github.com/openshift/api/build/v1/zz_generated.deepcopy.go @@ -0,0 +1,1610 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BinaryBuildRequestOptions) DeepCopyInto(out *BinaryBuildRequestOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BinaryBuildRequestOptions. +func (in *BinaryBuildRequestOptions) DeepCopy() *BinaryBuildRequestOptions { + if in == nil { + return nil + } + out := new(BinaryBuildRequestOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BinaryBuildRequestOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BinaryBuildSource) DeepCopyInto(out *BinaryBuildSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BinaryBuildSource. +func (in *BinaryBuildSource) DeepCopy() *BinaryBuildSource { + if in == nil { + return nil + } + out := new(BinaryBuildSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BitbucketWebHookCause) DeepCopyInto(out *BitbucketWebHookCause) { + *out = *in + in.CommonWebHookCause.DeepCopyInto(&out.CommonWebHookCause) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BitbucketWebHookCause. +func (in *BitbucketWebHookCause) DeepCopy() *BitbucketWebHookCause { + if in == nil { + return nil + } + out := new(BitbucketWebHookCause) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Build) DeepCopyInto(out *Build) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Build. +func (in *Build) DeepCopy() *Build { + if in == nil { + return nil + } + out := new(Build) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Build) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildCondition) DeepCopyInto(out *BuildCondition) { + *out = *in + in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime) + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildCondition. +func (in *BuildCondition) DeepCopy() *BuildCondition { + if in == nil { + return nil + } + out := new(BuildCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildConfig) DeepCopyInto(out *BuildConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildConfig. +func (in *BuildConfig) DeepCopy() *BuildConfig { + if in == nil { + return nil + } + out := new(BuildConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BuildConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildConfigList) DeepCopyInto(out *BuildConfigList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]BuildConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildConfigList. +func (in *BuildConfigList) DeepCopy() *BuildConfigList { + if in == nil { + return nil + } + out := new(BuildConfigList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BuildConfigList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildConfigSpec) DeepCopyInto(out *BuildConfigSpec) { + *out = *in + if in.Triggers != nil { + in, out := &in.Triggers, &out.Triggers + *out = make([]BuildTriggerPolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.CommonSpec.DeepCopyInto(&out.CommonSpec) + if in.SuccessfulBuildsHistoryLimit != nil { + in, out := &in.SuccessfulBuildsHistoryLimit, &out.SuccessfulBuildsHistoryLimit + *out = new(int32) + **out = **in + } + if in.FailedBuildsHistoryLimit != nil { + in, out := &in.FailedBuildsHistoryLimit, &out.FailedBuildsHistoryLimit + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildConfigSpec. +func (in *BuildConfigSpec) DeepCopy() *BuildConfigSpec { + if in == nil { + return nil + } + out := new(BuildConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildConfigStatus) DeepCopyInto(out *BuildConfigStatus) { + *out = *in + if in.ImageChangeTriggers != nil { + in, out := &in.ImageChangeTriggers, &out.ImageChangeTriggers + *out = make([]ImageChangeTriggerStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildConfigStatus. +func (in *BuildConfigStatus) DeepCopy() *BuildConfigStatus { + if in == nil { + return nil + } + out := new(BuildConfigStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildList) DeepCopyInto(out *BuildList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Build, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildList. +func (in *BuildList) DeepCopy() *BuildList { + if in == nil { + return nil + } + out := new(BuildList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BuildList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildLog) DeepCopyInto(out *BuildLog) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildLog. +func (in *BuildLog) DeepCopy() *BuildLog { + if in == nil { + return nil + } + out := new(BuildLog) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BuildLog) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildLogOptions) DeepCopyInto(out *BuildLogOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.SinceSeconds != nil { + in, out := &in.SinceSeconds, &out.SinceSeconds + *out = new(int64) + **out = **in + } + if in.SinceTime != nil { + in, out := &in.SinceTime, &out.SinceTime + *out = (*in).DeepCopy() + } + if in.TailLines != nil { + in, out := &in.TailLines, &out.TailLines + *out = new(int64) + **out = **in + } + if in.LimitBytes != nil { + in, out := &in.LimitBytes, &out.LimitBytes + *out = new(int64) + **out = **in + } + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(int64) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildLogOptions. +func (in *BuildLogOptions) DeepCopy() *BuildLogOptions { + if in == nil { + return nil + } + out := new(BuildLogOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BuildLogOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildOutput) DeepCopyInto(out *BuildOutput) { + *out = *in + if in.To != nil { + in, out := &in.To, &out.To + *out = new(corev1.ObjectReference) + **out = **in + } + if in.PushSecret != nil { + in, out := &in.PushSecret, &out.PushSecret + *out = new(corev1.LocalObjectReference) + **out = **in + } + if in.ImageLabels != nil { + in, out := &in.ImageLabels, &out.ImageLabels + *out = make([]ImageLabel, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildOutput. +func (in *BuildOutput) DeepCopy() *BuildOutput { + if in == nil { + return nil + } + out := new(BuildOutput) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildPostCommitSpec) DeepCopyInto(out *BuildPostCommitSpec) { + *out = *in + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildPostCommitSpec. +func (in *BuildPostCommitSpec) DeepCopy() *BuildPostCommitSpec { + if in == nil { + return nil + } + out := new(BuildPostCommitSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildRequest) DeepCopyInto(out *BuildRequest) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Revision != nil { + in, out := &in.Revision, &out.Revision + *out = new(SourceRevision) + (*in).DeepCopyInto(*out) + } + if in.TriggeredByImage != nil { + in, out := &in.TriggeredByImage, &out.TriggeredByImage + *out = new(corev1.ObjectReference) + **out = **in + } + if in.From != nil { + in, out := &in.From, &out.From + *out = new(corev1.ObjectReference) + **out = **in + } + if in.Binary != nil { + in, out := &in.Binary, &out.Binary + *out = new(BinaryBuildSource) + **out = **in + } + if in.LastVersion != nil { + in, out := &in.LastVersion, &out.LastVersion + *out = new(int64) + **out = **in + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]corev1.EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TriggeredBy != nil { + in, out := &in.TriggeredBy, &out.TriggeredBy + *out = make([]BuildTriggerCause, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DockerStrategyOptions != nil { + in, out := &in.DockerStrategyOptions, &out.DockerStrategyOptions + *out = new(DockerStrategyOptions) + (*in).DeepCopyInto(*out) + } + if in.SourceStrategyOptions != nil { + in, out := &in.SourceStrategyOptions, &out.SourceStrategyOptions + *out = new(SourceStrategyOptions) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildRequest. +func (in *BuildRequest) DeepCopy() *BuildRequest { + if in == nil { + return nil + } + out := new(BuildRequest) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BuildRequest) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildSource) DeepCopyInto(out *BuildSource) { + *out = *in + if in.Binary != nil { + in, out := &in.Binary, &out.Binary + *out = new(BinaryBuildSource) + **out = **in + } + if in.Dockerfile != nil { + in, out := &in.Dockerfile, &out.Dockerfile + *out = new(string) + **out = **in + } + if in.Git != nil { + in, out := &in.Git, &out.Git + *out = new(GitBuildSource) + (*in).DeepCopyInto(*out) + } + if in.Images != nil { + in, out := &in.Images, &out.Images + *out = make([]ImageSource, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SourceSecret != nil { + in, out := &in.SourceSecret, &out.SourceSecret + *out = new(corev1.LocalObjectReference) + **out = **in + } + if in.Secrets != nil { + in, out := &in.Secrets, &out.Secrets + *out = make([]SecretBuildSource, len(*in)) + copy(*out, *in) + } + if in.ConfigMaps != nil { + in, out := &in.ConfigMaps, &out.ConfigMaps + *out = make([]ConfigMapBuildSource, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildSource. +func (in *BuildSource) DeepCopy() *BuildSource { + if in == nil { + return nil + } + out := new(BuildSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildSpec) DeepCopyInto(out *BuildSpec) { + *out = *in + in.CommonSpec.DeepCopyInto(&out.CommonSpec) + if in.TriggeredBy != nil { + in, out := &in.TriggeredBy, &out.TriggeredBy + *out = make([]BuildTriggerCause, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildSpec. +func (in *BuildSpec) DeepCopy() *BuildSpec { + if in == nil { + return nil + } + out := new(BuildSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildStatus) DeepCopyInto(out *BuildStatus) { + *out = *in + if in.StartTimestamp != nil { + in, out := &in.StartTimestamp, &out.StartTimestamp + *out = (*in).DeepCopy() + } + if in.CompletionTimestamp != nil { + in, out := &in.CompletionTimestamp, &out.CompletionTimestamp + *out = (*in).DeepCopy() + } + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = new(corev1.ObjectReference) + **out = **in + } + in.Output.DeepCopyInto(&out.Output) + if in.Stages != nil { + in, out := &in.Stages, &out.Stages + *out = make([]StageInfo, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]BuildCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildStatus. +func (in *BuildStatus) DeepCopy() *BuildStatus { + if in == nil { + return nil + } + out := new(BuildStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildStatusOutput) DeepCopyInto(out *BuildStatusOutput) { + *out = *in + if in.To != nil { + in, out := &in.To, &out.To + *out = new(BuildStatusOutputTo) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildStatusOutput. +func (in *BuildStatusOutput) DeepCopy() *BuildStatusOutput { + if in == nil { + return nil + } + out := new(BuildStatusOutput) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildStatusOutputTo) DeepCopyInto(out *BuildStatusOutputTo) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildStatusOutputTo. +func (in *BuildStatusOutputTo) DeepCopy() *BuildStatusOutputTo { + if in == nil { + return nil + } + out := new(BuildStatusOutputTo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildStrategy) DeepCopyInto(out *BuildStrategy) { + *out = *in + if in.DockerStrategy != nil { + in, out := &in.DockerStrategy, &out.DockerStrategy + *out = new(DockerBuildStrategy) + (*in).DeepCopyInto(*out) + } + if in.SourceStrategy != nil { + in, out := &in.SourceStrategy, &out.SourceStrategy + *out = new(SourceBuildStrategy) + (*in).DeepCopyInto(*out) + } + if in.CustomStrategy != nil { + in, out := &in.CustomStrategy, &out.CustomStrategy + *out = new(CustomBuildStrategy) + (*in).DeepCopyInto(*out) + } + if in.JenkinsPipelineStrategy != nil { + in, out := &in.JenkinsPipelineStrategy, &out.JenkinsPipelineStrategy + *out = new(JenkinsPipelineBuildStrategy) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildStrategy. +func (in *BuildStrategy) DeepCopy() *BuildStrategy { + if in == nil { + return nil + } + out := new(BuildStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildTriggerCause) DeepCopyInto(out *BuildTriggerCause) { + *out = *in + if in.GenericWebHook != nil { + in, out := &in.GenericWebHook, &out.GenericWebHook + *out = new(GenericWebHookCause) + (*in).DeepCopyInto(*out) + } + if in.GitHubWebHook != nil { + in, out := &in.GitHubWebHook, &out.GitHubWebHook + *out = new(GitHubWebHookCause) + (*in).DeepCopyInto(*out) + } + if in.ImageChangeBuild != nil { + in, out := &in.ImageChangeBuild, &out.ImageChangeBuild + *out = new(ImageChangeCause) + (*in).DeepCopyInto(*out) + } + if in.GitLabWebHook != nil { + in, out := &in.GitLabWebHook, &out.GitLabWebHook + *out = new(GitLabWebHookCause) + (*in).DeepCopyInto(*out) + } + if in.BitbucketWebHook != nil { + in, out := &in.BitbucketWebHook, &out.BitbucketWebHook + *out = new(BitbucketWebHookCause) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildTriggerCause. +func (in *BuildTriggerCause) DeepCopy() *BuildTriggerCause { + if in == nil { + return nil + } + out := new(BuildTriggerCause) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildTriggerPolicy) DeepCopyInto(out *BuildTriggerPolicy) { + *out = *in + if in.GitHubWebHook != nil { + in, out := &in.GitHubWebHook, &out.GitHubWebHook + *out = new(WebHookTrigger) + (*in).DeepCopyInto(*out) + } + if in.GenericWebHook != nil { + in, out := &in.GenericWebHook, &out.GenericWebHook + *out = new(WebHookTrigger) + (*in).DeepCopyInto(*out) + } + if in.ImageChange != nil { + in, out := &in.ImageChange, &out.ImageChange + *out = new(ImageChangeTrigger) + (*in).DeepCopyInto(*out) + } + if in.GitLabWebHook != nil { + in, out := &in.GitLabWebHook, &out.GitLabWebHook + *out = new(WebHookTrigger) + (*in).DeepCopyInto(*out) + } + if in.BitbucketWebHook != nil { + in, out := &in.BitbucketWebHook, &out.BitbucketWebHook + *out = new(WebHookTrigger) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildTriggerPolicy. +func (in *BuildTriggerPolicy) DeepCopy() *BuildTriggerPolicy { + if in == nil { + return nil + } + out := new(BuildTriggerPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildVolume) DeepCopyInto(out *BuildVolume) { + *out = *in + in.Source.DeepCopyInto(&out.Source) + if in.Mounts != nil { + in, out := &in.Mounts, &out.Mounts + *out = make([]BuildVolumeMount, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildVolume. +func (in *BuildVolume) DeepCopy() *BuildVolume { + if in == nil { + return nil + } + out := new(BuildVolume) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildVolumeMount) DeepCopyInto(out *BuildVolumeMount) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildVolumeMount. +func (in *BuildVolumeMount) DeepCopy() *BuildVolumeMount { + if in == nil { + return nil + } + out := new(BuildVolumeMount) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildVolumeSource) DeepCopyInto(out *BuildVolumeSource) { + *out = *in + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + *out = new(corev1.SecretVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.ConfigMap != nil { + in, out := &in.ConfigMap, &out.ConfigMap + *out = new(corev1.ConfigMapVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.CSI != nil { + in, out := &in.CSI, &out.CSI + *out = new(corev1.CSIVolumeSource) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildVolumeSource. +func (in *BuildVolumeSource) DeepCopy() *BuildVolumeSource { + if in == nil { + return nil + } + out := new(BuildVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CommonSpec) DeepCopyInto(out *CommonSpec) { + *out = *in + in.Source.DeepCopyInto(&out.Source) + if in.Revision != nil { + in, out := &in.Revision, &out.Revision + *out = new(SourceRevision) + (*in).DeepCopyInto(*out) + } + in.Strategy.DeepCopyInto(&out.Strategy) + in.Output.DeepCopyInto(&out.Output) + in.Resources.DeepCopyInto(&out.Resources) + in.PostCommit.DeepCopyInto(&out.PostCommit) + if in.CompletionDeadlineSeconds != nil { + in, out := &in.CompletionDeadlineSeconds, &out.CompletionDeadlineSeconds + *out = new(int64) + **out = **in + } + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(OptionalNodeSelector, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.MountTrustedCA != nil { + in, out := &in.MountTrustedCA, &out.MountTrustedCA + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommonSpec. +func (in *CommonSpec) DeepCopy() *CommonSpec { + if in == nil { + return nil + } + out := new(CommonSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CommonWebHookCause) DeepCopyInto(out *CommonWebHookCause) { + *out = *in + if in.Revision != nil { + in, out := &in.Revision, &out.Revision + *out = new(SourceRevision) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommonWebHookCause. +func (in *CommonWebHookCause) DeepCopy() *CommonWebHookCause { + if in == nil { + return nil + } + out := new(CommonWebHookCause) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigMapBuildSource) DeepCopyInto(out *ConfigMapBuildSource) { + *out = *in + out.ConfigMap = in.ConfigMap + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapBuildSource. +func (in *ConfigMapBuildSource) DeepCopy() *ConfigMapBuildSource { + if in == nil { + return nil + } + out := new(ConfigMapBuildSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomBuildStrategy) DeepCopyInto(out *CustomBuildStrategy) { + *out = *in + out.From = in.From + if in.PullSecret != nil { + in, out := &in.PullSecret, &out.PullSecret + *out = new(corev1.LocalObjectReference) + **out = **in + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]corev1.EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Secrets != nil { + in, out := &in.Secrets, &out.Secrets + *out = make([]SecretSpec, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomBuildStrategy. +func (in *CustomBuildStrategy) DeepCopy() *CustomBuildStrategy { + if in == nil { + return nil + } + out := new(CustomBuildStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DockerBuildStrategy) DeepCopyInto(out *DockerBuildStrategy) { + *out = *in + if in.From != nil { + in, out := &in.From, &out.From + *out = new(corev1.ObjectReference) + **out = **in + } + if in.PullSecret != nil { + in, out := &in.PullSecret, &out.PullSecret + *out = new(corev1.LocalObjectReference) + **out = **in + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]corev1.EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.BuildArgs != nil { + in, out := &in.BuildArgs, &out.BuildArgs + *out = make([]corev1.EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ImageOptimizationPolicy != nil { + in, out := &in.ImageOptimizationPolicy, &out.ImageOptimizationPolicy + *out = new(ImageOptimizationPolicy) + **out = **in + } + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]BuildVolume, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerBuildStrategy. +func (in *DockerBuildStrategy) DeepCopy() *DockerBuildStrategy { + if in == nil { + return nil + } + out := new(DockerBuildStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DockerStrategyOptions) DeepCopyInto(out *DockerStrategyOptions) { + *out = *in + if in.BuildArgs != nil { + in, out := &in.BuildArgs, &out.BuildArgs + *out = make([]corev1.EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NoCache != nil { + in, out := &in.NoCache, &out.NoCache + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerStrategyOptions. +func (in *DockerStrategyOptions) DeepCopy() *DockerStrategyOptions { + if in == nil { + return nil + } + out := new(DockerStrategyOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GenericWebHookCause) DeepCopyInto(out *GenericWebHookCause) { + *out = *in + if in.Revision != nil { + in, out := &in.Revision, &out.Revision + *out = new(SourceRevision) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenericWebHookCause. +func (in *GenericWebHookCause) DeepCopy() *GenericWebHookCause { + if in == nil { + return nil + } + out := new(GenericWebHookCause) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GenericWebHookEvent) DeepCopyInto(out *GenericWebHookEvent) { + *out = *in + if in.Git != nil { + in, out := &in.Git, &out.Git + *out = new(GitInfo) + (*in).DeepCopyInto(*out) + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]corev1.EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DockerStrategyOptions != nil { + in, out := &in.DockerStrategyOptions, &out.DockerStrategyOptions + *out = new(DockerStrategyOptions) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenericWebHookEvent. +func (in *GenericWebHookEvent) DeepCopy() *GenericWebHookEvent { + if in == nil { + return nil + } + out := new(GenericWebHookEvent) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitBuildSource) DeepCopyInto(out *GitBuildSource) { + *out = *in + in.ProxyConfig.DeepCopyInto(&out.ProxyConfig) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitBuildSource. +func (in *GitBuildSource) DeepCopy() *GitBuildSource { + if in == nil { + return nil + } + out := new(GitBuildSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitHubWebHookCause) DeepCopyInto(out *GitHubWebHookCause) { + *out = *in + if in.Revision != nil { + in, out := &in.Revision, &out.Revision + *out = new(SourceRevision) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitHubWebHookCause. +func (in *GitHubWebHookCause) DeepCopy() *GitHubWebHookCause { + if in == nil { + return nil + } + out := new(GitHubWebHookCause) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitInfo) DeepCopyInto(out *GitInfo) { + *out = *in + in.GitBuildSource.DeepCopyInto(&out.GitBuildSource) + out.GitSourceRevision = in.GitSourceRevision + if in.Refs != nil { + in, out := &in.Refs, &out.Refs + *out = make([]GitRefInfo, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitInfo. +func (in *GitInfo) DeepCopy() *GitInfo { + if in == nil { + return nil + } + out := new(GitInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitLabWebHookCause) DeepCopyInto(out *GitLabWebHookCause) { + *out = *in + in.CommonWebHookCause.DeepCopyInto(&out.CommonWebHookCause) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitLabWebHookCause. +func (in *GitLabWebHookCause) DeepCopy() *GitLabWebHookCause { + if in == nil { + return nil + } + out := new(GitLabWebHookCause) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitRefInfo) DeepCopyInto(out *GitRefInfo) { + *out = *in + in.GitBuildSource.DeepCopyInto(&out.GitBuildSource) + out.GitSourceRevision = in.GitSourceRevision + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRefInfo. +func (in *GitRefInfo) DeepCopy() *GitRefInfo { + if in == nil { + return nil + } + out := new(GitRefInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitSourceRevision) DeepCopyInto(out *GitSourceRevision) { + *out = *in + out.Author = in.Author + out.Committer = in.Committer + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitSourceRevision. +func (in *GitSourceRevision) DeepCopy() *GitSourceRevision { + if in == nil { + return nil + } + out := new(GitSourceRevision) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageChangeCause) DeepCopyInto(out *ImageChangeCause) { + *out = *in + if in.FromRef != nil { + in, out := &in.FromRef, &out.FromRef + *out = new(corev1.ObjectReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageChangeCause. +func (in *ImageChangeCause) DeepCopy() *ImageChangeCause { + if in == nil { + return nil + } + out := new(ImageChangeCause) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageChangeTrigger) DeepCopyInto(out *ImageChangeTrigger) { + *out = *in + if in.From != nil { + in, out := &in.From, &out.From + *out = new(corev1.ObjectReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageChangeTrigger. +func (in *ImageChangeTrigger) DeepCopy() *ImageChangeTrigger { + if in == nil { + return nil + } + out := new(ImageChangeTrigger) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageChangeTriggerStatus) DeepCopyInto(out *ImageChangeTriggerStatus) { + *out = *in + out.From = in.From + in.LastTriggerTime.DeepCopyInto(&out.LastTriggerTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageChangeTriggerStatus. +func (in *ImageChangeTriggerStatus) DeepCopy() *ImageChangeTriggerStatus { + if in == nil { + return nil + } + out := new(ImageChangeTriggerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageLabel) DeepCopyInto(out *ImageLabel) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageLabel. +func (in *ImageLabel) DeepCopy() *ImageLabel { + if in == nil { + return nil + } + out := new(ImageLabel) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageSource) DeepCopyInto(out *ImageSource) { + *out = *in + out.From = in.From + if in.As != nil { + in, out := &in.As, &out.As + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Paths != nil { + in, out := &in.Paths, &out.Paths + *out = make([]ImageSourcePath, len(*in)) + copy(*out, *in) + } + if in.PullSecret != nil { + in, out := &in.PullSecret, &out.PullSecret + *out = new(corev1.LocalObjectReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageSource. +func (in *ImageSource) DeepCopy() *ImageSource { + if in == nil { + return nil + } + out := new(ImageSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageSourcePath) DeepCopyInto(out *ImageSourcePath) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageSourcePath. +func (in *ImageSourcePath) DeepCopy() *ImageSourcePath { + if in == nil { + return nil + } + out := new(ImageSourcePath) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageStreamTagReference) DeepCopyInto(out *ImageStreamTagReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStreamTagReference. +func (in *ImageStreamTagReference) DeepCopy() *ImageStreamTagReference { + if in == nil { + return nil + } + out := new(ImageStreamTagReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JenkinsPipelineBuildStrategy) DeepCopyInto(out *JenkinsPipelineBuildStrategy) { + *out = *in + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]corev1.EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JenkinsPipelineBuildStrategy. +func (in *JenkinsPipelineBuildStrategy) DeepCopy() *JenkinsPipelineBuildStrategy { + if in == nil { + return nil + } + out := new(JenkinsPipelineBuildStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in OptionalNodeSelector) DeepCopyInto(out *OptionalNodeSelector) { + { + in := &in + *out = make(OptionalNodeSelector, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OptionalNodeSelector. +func (in OptionalNodeSelector) DeepCopy() OptionalNodeSelector { + if in == nil { + return nil + } + out := new(OptionalNodeSelector) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProxyConfig) DeepCopyInto(out *ProxyConfig) { + *out = *in + if in.HTTPProxy != nil { + in, out := &in.HTTPProxy, &out.HTTPProxy + *out = new(string) + **out = **in + } + if in.HTTPSProxy != nil { + in, out := &in.HTTPSProxy, &out.HTTPSProxy + *out = new(string) + **out = **in + } + if in.NoProxy != nil { + in, out := &in.NoProxy, &out.NoProxy + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyConfig. +func (in *ProxyConfig) DeepCopy() *ProxyConfig { + if in == nil { + return nil + } + out := new(ProxyConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretBuildSource) DeepCopyInto(out *SecretBuildSource) { + *out = *in + out.Secret = in.Secret + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretBuildSource. +func (in *SecretBuildSource) DeepCopy() *SecretBuildSource { + if in == nil { + return nil + } + out := new(SecretBuildSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretLocalReference) DeepCopyInto(out *SecretLocalReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretLocalReference. +func (in *SecretLocalReference) DeepCopy() *SecretLocalReference { + if in == nil { + return nil + } + out := new(SecretLocalReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretSpec) DeepCopyInto(out *SecretSpec) { + *out = *in + out.SecretSource = in.SecretSource + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretSpec. +func (in *SecretSpec) DeepCopy() *SecretSpec { + if in == nil { + return nil + } + out := new(SecretSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceBuildStrategy) DeepCopyInto(out *SourceBuildStrategy) { + *out = *in + out.From = in.From + if in.PullSecret != nil { + in, out := &in.PullSecret, &out.PullSecret + *out = new(corev1.LocalObjectReference) + **out = **in + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]corev1.EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Incremental != nil { + in, out := &in.Incremental, &out.Incremental + *out = new(bool) + **out = **in + } + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]BuildVolume, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceBuildStrategy. +func (in *SourceBuildStrategy) DeepCopy() *SourceBuildStrategy { + if in == nil { + return nil + } + out := new(SourceBuildStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceControlUser) DeepCopyInto(out *SourceControlUser) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceControlUser. +func (in *SourceControlUser) DeepCopy() *SourceControlUser { + if in == nil { + return nil + } + out := new(SourceControlUser) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceRevision) DeepCopyInto(out *SourceRevision) { + *out = *in + if in.Git != nil { + in, out := &in.Git, &out.Git + *out = new(GitSourceRevision) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceRevision. +func (in *SourceRevision) DeepCopy() *SourceRevision { + if in == nil { + return nil + } + out := new(SourceRevision) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceStrategyOptions) DeepCopyInto(out *SourceStrategyOptions) { + *out = *in + if in.Incremental != nil { + in, out := &in.Incremental, &out.Incremental + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceStrategyOptions. +func (in *SourceStrategyOptions) DeepCopy() *SourceStrategyOptions { + if in == nil { + return nil + } + out := new(SourceStrategyOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StageInfo) DeepCopyInto(out *StageInfo) { + *out = *in + in.StartTime.DeepCopyInto(&out.StartTime) + if in.Steps != nil { + in, out := &in.Steps, &out.Steps + *out = make([]StepInfo, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StageInfo. +func (in *StageInfo) DeepCopy() *StageInfo { + if in == nil { + return nil + } + out := new(StageInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StepInfo) DeepCopyInto(out *StepInfo) { + *out = *in + in.StartTime.DeepCopyInto(&out.StartTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StepInfo. +func (in *StepInfo) DeepCopy() *StepInfo { + if in == nil { + return nil + } + out := new(StepInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebHookTrigger) DeepCopyInto(out *WebHookTrigger) { + *out = *in + if in.SecretReference != nil { + in, out := &in.SecretReference, &out.SecretReference + *out = new(SecretLocalReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebHookTrigger. +func (in *WebHookTrigger) DeepCopy() *WebHookTrigger { + if in == nil { + return nil + } + out := new(WebHookTrigger) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/api/build/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/build/v1/zz_generated.swagger_doc_generated.go new file mode 100644 index 000000000..72ff507b7 --- /dev/null +++ b/vendor/github.com/openshift/api/build/v1/zz_generated.swagger_doc_generated.go @@ -0,0 +1,692 @@ +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_BinaryBuildRequestOptions = map[string]string{ + "": "BinaryBuildRequestOptions are the options required to fully speficy a binary build request\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "asFile": "asFile determines if the binary should be created as a file within the source rather than extracted as an archive", + "revision.commit": "revision.commit is the value identifying a specific commit", + "revision.message": "revision.message is the description of a specific commit", + "revision.authorName": "revision.authorName of the source control user", + "revision.authorEmail": "revision.authorEmail of the source control user", + "revision.committerName": "revision.committerName of the source control user", + "revision.committerEmail": "revision.committerEmail of the source control user", +} + +func (BinaryBuildRequestOptions) SwaggerDoc() map[string]string { + return map_BinaryBuildRequestOptions +} + +var map_BinaryBuildSource = map[string]string{ + "": "BinaryBuildSource describes a binary file to be used for the Docker and Source build strategies, where the file will be extracted and used as the build source.", + "asFile": "asFile indicates that the provided binary input should be considered a single file within the build input. For example, specifying \"webapp.war\" would place the provided binary as `/webapp.war` for the builder. If left empty, the Docker and Source build strategies assume this file is a zip, tar, or tar.gz file and extract it as the source. The custom strategy receives this binary as standard input. This filename may not contain slashes or be '..' or '.'.", +} + +func (BinaryBuildSource) SwaggerDoc() map[string]string { + return map_BinaryBuildSource +} + +var map_BitbucketWebHookCause = map[string]string{ + "": "BitbucketWebHookCause has information about a Bitbucket webhook that triggered a build.", +} + +func (BitbucketWebHookCause) SwaggerDoc() map[string]string { + return map_BitbucketWebHookCause +} + +var map_Build = map[string]string{ + "": "Build encapsulates the inputs needed to produce a new deployable image, as well as the status of the execution and a reference to the Pod which executed the build.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec is all the inputs used to execute the build.", + "status": "status is the current status of the build.", +} + +func (Build) SwaggerDoc() map[string]string { + return map_Build +} + +var map_BuildCondition = map[string]string{ + "": "BuildCondition describes the state of a build at a certain point.", + "type": "Type of build condition.", + "status": "Status of the condition, one of True, False, Unknown.", + "lastUpdateTime": "The last time this condition was updated.", + "lastTransitionTime": "The last time the condition transitioned from one status to another.", + "reason": "The reason for the condition's last transition.", + "message": "A human readable message indicating details about the transition.", +} + +func (BuildCondition) SwaggerDoc() map[string]string { + return map_BuildCondition +} + +var map_BuildConfig = map[string]string{ + "": "Build configurations define a build process for new container images. There are three types of builds possible - a container image build using a Dockerfile, a Source-to-Image build that uses a specially prepared base image that accepts source code that it can make runnable, and a custom build that can run // arbitrary container images as a base and accept the build parameters. Builds run on the cluster and on completion are pushed to the container image registry specified in the \"output\" section. A build can be triggered via a webhook, when the base image changes, or when a user manually requests a new build be // created.\n\nEach build created by a build configuration is numbered and refers back to its parent configuration. Multiple builds can be triggered at once. Builds that do not have \"output\" set can be used to test code or run a verification build.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec holds all the input necessary to produce a new build, and the conditions when to trigger them.", + "status": "status holds any relevant information about a build config", +} + +func (BuildConfig) SwaggerDoc() map[string]string { + return map_BuildConfig +} + +var map_BuildConfigList = map[string]string{ + "": "BuildConfigList is a collection of BuildConfigs.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is a list of build configs", +} + +func (BuildConfigList) SwaggerDoc() map[string]string { + return map_BuildConfigList +} + +var map_BuildConfigSpec = map[string]string{ + "": "BuildConfigSpec describes when and how builds are created", + "triggers": "triggers determine how new Builds can be launched from a BuildConfig. If no triggers are defined, a new build can only occur as a result of an explicit client build creation.", + "runPolicy": "RunPolicy describes how the new build created from this build configuration will be scheduled for execution. This is optional, if not specified we default to \"Serial\".", + "successfulBuildsHistoryLimit": "successfulBuildsHistoryLimit is the number of old successful builds to retain. When a BuildConfig is created, the 5 most recent successful builds are retained unless this value is set. If removed after the BuildConfig has been created, all successful builds are retained.", + "failedBuildsHistoryLimit": "failedBuildsHistoryLimit is the number of old failed builds to retain. When a BuildConfig is created, the 5 most recent failed builds are retained unless this value is set. If removed after the BuildConfig has been created, all failed builds are retained.", +} + +func (BuildConfigSpec) SwaggerDoc() map[string]string { + return map_BuildConfigSpec +} + +var map_BuildConfigStatus = map[string]string{ + "": "BuildConfigStatus contains current state of the build config object.", + "lastVersion": "lastVersion is used to inform about number of last triggered build.", + "imageChangeTriggers": "ImageChangeTriggers captures the runtime state of any ImageChangeTrigger specified in the BuildConfigSpec, including the value reconciled by the OpenShift APIServer for the lastTriggeredImageID. There is a single entry in this array for each image change trigger in spec. Each trigger status references the ImageStreamTag that acts as the source of the trigger.", +} + +func (BuildConfigStatus) SwaggerDoc() map[string]string { + return map_BuildConfigStatus +} + +var map_BuildList = map[string]string{ + "": "BuildList is a collection of Builds.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is a list of builds", +} + +func (BuildList) SwaggerDoc() map[string]string { + return map_BuildList +} + +var map_BuildLog = map[string]string{ + "": "BuildLog is the (unused) resource associated with the build log redirector\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", +} + +func (BuildLog) SwaggerDoc() map[string]string { + return map_BuildLog +} + +var map_BuildLogOptions = map[string]string{ + "": "BuildLogOptions is the REST options for a build log\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "container": "cointainer for which to stream logs. Defaults to only container if there is one container in the pod.", + "follow": "follow if true indicates that the build log should be streamed until the build terminates.", + "previous": "previous returns previous build logs. Defaults to false.", + "sinceSeconds": "sinceSeconds is a relative time in seconds before the current time from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.", + "sinceTime": "sinceTime is an RFC3339 timestamp from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.", + "timestamps": "timestamps, If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line of log output. Defaults to false.", + "tailLines": "tailLines, If set, is the number of lines from the end of the logs to show. If not specified, logs are shown from the creation of the container or sinceSeconds or sinceTime", + "limitBytes": "limitBytes, If set, is the number of bytes to read from the server before terminating the log output. This may not display a complete final line of logging, and may return slightly more or slightly less than the specified limit.", + "nowait": "noWait if true causes the call to return immediately even if the build is not available yet. Otherwise the server will wait until the build has started.", + "version": "version of the build for which to view logs.", + "insecureSkipTLSVerifyBackend": "insecureSkipTLSVerifyBackend indicates that the apiserver should not confirm the validity of the serving certificate of the backend it is connecting to. This will make the HTTPS connection between the apiserver and the backend insecure. This means the apiserver cannot verify the log data it is receiving came from the real kubelet. If the kubelet is configured to verify the apiserver's TLS credentials, it does not mean the connection to the real kubelet is vulnerable to a man in the middle attack (e.g. an attacker could not intercept the actual log data coming from the real kubelet).", +} + +func (BuildLogOptions) SwaggerDoc() map[string]string { + return map_BuildLogOptions +} + +var map_BuildOutput = map[string]string{ + "": "BuildOutput is input to a build strategy and describes the container image that the strategy should produce.", + "to": "to defines an optional location to push the output of this build to. Kind must be one of 'ImageStreamTag' or 'DockerImage'. This value will be used to look up a container image repository to push to. In the case of an ImageStreamTag, the ImageStreamTag will be looked for in the namespace of the build unless Namespace is specified.", + "pushSecret": "PushSecret is the name of a Secret that would be used for setting up the authentication for executing the Docker push to authentication enabled Docker Registry (or Docker Hub).", + "imageLabels": "imageLabels define a list of labels that are applied to the resulting image. If there are multiple labels with the same name then the last one in the list is used.", +} + +func (BuildOutput) SwaggerDoc() map[string]string { + return map_BuildOutput +} + +var map_BuildPostCommitSpec = map[string]string{ + "": "A BuildPostCommitSpec holds a build post commit hook specification. The hook executes a command in a temporary container running the build output image, immediately after the last layer of the image is committed and before the image is pushed to a registry. The command is executed with the current working directory ($PWD) set to the image's WORKDIR.\n\nThe build will be marked as failed if the hook execution fails. It will fail if the script or command return a non-zero exit code, or if there is any other error related to starting the temporary container.\n\nThere are five different ways to configure the hook. As an example, all forms below are equivalent and will execute `rake test --verbose`.\n\n1. Shell script:\n\n\t \"postCommit\": {\n\t \"script\": \"rake test --verbose\",\n\t }\n\n\tThe above is a convenient form which is equivalent to:\n\n\t \"postCommit\": {\n\t \"command\": [\"/bin/sh\", \"-ic\"],\n\t \"args\": [\"rake test --verbose\"]\n\t }\n\n2. A command as the image entrypoint:\n\n\t \"postCommit\": {\n\t \"commit\": [\"rake\", \"test\", \"--verbose\"]\n\t }\n\n\tCommand overrides the image entrypoint in the exec form, as documented in\n\tDocker: https://docs.docker.com/engine/reference/builder/#entrypoint.\n\n3. Pass arguments to the default entrypoint:\n\n\t \"postCommit\": {\n\t\t\t \"args\": [\"rake\", \"test\", \"--verbose\"]\n\t\t }\n\n\t This form is only useful if the image entrypoint can handle arguments.\n\n4. Shell script with arguments:\n\n\t \"postCommit\": {\n\t \"script\": \"rake test $1\",\n\t \"args\": [\"--verbose\"]\n\t }\n\n\tThis form is useful if you need to pass arguments that would otherwise be\n\thard to quote properly in the shell script. In the script, $0 will be\n\t\"/bin/sh\" and $1, $2, etc, are the positional arguments from Args.\n\n5. Command with arguments:\n\n\t \"postCommit\": {\n\t \"command\": [\"rake\", \"test\"],\n\t \"args\": [\"--verbose\"]\n\t }\n\n\tThis form is equivalent to appending the arguments to the Command slice.\n\nIt is invalid to provide both Script and Command simultaneously. If none of the fields are specified, the hook is not executed.", + "command": "command is the command to run. It may not be specified with Script. This might be needed if the image doesn't have `/bin/sh`, or if you do not want to use a shell. In all other cases, using Script might be more convenient.", + "args": "args is a list of arguments that are provided to either Command, Script or the container image's default entrypoint. The arguments are placed immediately after the command to be run.", + "script": "script is a shell script to be run with `/bin/sh -ic`. It may not be specified with Command. Use Script when a shell script is appropriate to execute the post build hook, for example for running unit tests with `rake test`. If you need control over the image entrypoint, or if the image does not have `/bin/sh`, use Command and/or Args. The `-i` flag is needed to support CentOS and RHEL images that use Software Collections (SCL), in order to have the appropriate collections enabled in the shell. E.g., in the Ruby image, this is necessary to make `ruby`, `bundle` and other binaries available in the PATH.", +} + +func (BuildPostCommitSpec) SwaggerDoc() map[string]string { + return map_BuildPostCommitSpec +} + +var map_BuildRequest = map[string]string{ + "": "BuildRequest is the resource used to pass parameters to build generator\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "revision": "revision is the information from the source for a specific repo snapshot.", + "triggeredByImage": "triggeredByImage is the Image that triggered this build.", + "from": "from is the reference to the ImageStreamTag that triggered the build.", + "binary": "binary indicates a request to build from a binary provided to the builder", + "lastVersion": "lastVersion (optional) is the LastVersion of the BuildConfig that was used to generate the build. If the BuildConfig in the generator doesn't match, a build will not be generated.", + "env": "env contains additional environment variables you want to pass into a builder container.", + "triggeredBy": "triggeredBy describes which triggers started the most recent update to the build configuration and contains information about those triggers.", + "dockerStrategyOptions": "DockerStrategyOptions contains additional docker-strategy specific options for the build", + "sourceStrategyOptions": "SourceStrategyOptions contains additional source-strategy specific options for the build", +} + +func (BuildRequest) SwaggerDoc() map[string]string { + return map_BuildRequest +} + +var map_BuildSource = map[string]string{ + "": "BuildSource is the SCM used for the build.", + "type": "type of build input to accept", + "binary": "binary builds accept a binary as their input. The binary is generally assumed to be a tar, gzipped tar, or zip file depending on the strategy. For container image builds, this is the build context and an optional Dockerfile may be specified to override any Dockerfile in the build context. For Source builds, this is assumed to be an archive as described above. For Source and container image builds, if binary.asFile is set the build will receive a directory with a single file. contextDir may be used when an archive is provided. Custom builds will receive this binary as input on STDIN.", + "dockerfile": "dockerfile is the raw contents of a Dockerfile which should be built. When this option is specified, the FROM may be modified based on your strategy base image and additional ENV stanzas from your strategy environment will be added after the FROM, but before the rest of your Dockerfile stanzas. The Dockerfile source type may be used with other options like git - in those cases the Git repo will have any innate Dockerfile replaced in the context dir.", + "git": "git contains optional information about git build source", + "images": "images describes a set of images to be used to provide source for the build", + "contextDir": "contextDir specifies the sub-directory where the source code for the application exists. This allows to have buildable sources in directory other than root of repository.", + "sourceSecret": "sourceSecret is the name of a Secret that would be used for setting up the authentication for cloning private repository. The secret contains valid credentials for remote repository, where the data's key represent the authentication method to be used and value is the base64 encoded credentials. Supported auth methods are: ssh-privatekey.", + "secrets": "secrets represents a list of secrets and their destinations that will be used only for the build.", + "configMaps": "configMaps represents a list of configMaps and their destinations that will be used for the build.", +} + +func (BuildSource) SwaggerDoc() map[string]string { + return map_BuildSource +} + +var map_BuildSpec = map[string]string{ + "": "BuildSpec has the information to represent a build and also additional information about a build", + "triggeredBy": "triggeredBy describes which triggers started the most recent update to the build configuration and contains information about those triggers.", +} + +func (BuildSpec) SwaggerDoc() map[string]string { + return map_BuildSpec +} + +var map_BuildStatus = map[string]string{ + "": "BuildStatus contains the status of a build", + "phase": "phase is the point in the build lifecycle. Possible values are \"New\", \"Pending\", \"Running\", \"Complete\", \"Failed\", \"Error\", and \"Cancelled\".", + "cancelled": "cancelled describes if a cancel event was triggered for the build.", + "reason": "reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI.", + "message": "message is a human-readable message indicating details about why the build has this status.", + "startTimestamp": "startTimestamp is a timestamp representing the server time when this Build started running in a Pod. It is represented in RFC3339 form and is in UTC.", + "completionTimestamp": "completionTimestamp is a timestamp representing the server time when this Build was finished, whether that build failed or succeeded. It reflects the time at which the Pod running the Build terminated. It is represented in RFC3339 form and is in UTC.", + "duration": "duration contains time.Duration object describing build time.", + "outputDockerImageReference": "outputDockerImageReference contains a reference to the container image that will be built by this build. Its value is computed from Build.Spec.Output.To, and should include the registry address, so that it can be used to push and pull the image.", + "config": "config is an ObjectReference to the BuildConfig this Build is based on.", + "output": "output describes the container image the build has produced.", + "stages": "stages contains details about each stage that occurs during the build including start time, duration (in milliseconds), and the steps that occured within each stage.", + "logSnippet": "logSnippet is the last few lines of the build log. This value is only set for builds that failed.", + "conditions": "Conditions represents the latest available observations of a build's current state.", +} + +func (BuildStatus) SwaggerDoc() map[string]string { + return map_BuildStatus +} + +var map_BuildStatusOutput = map[string]string{ + "": "BuildStatusOutput contains the status of the built image.", + "to": "to describes the status of the built image being pushed to a registry.", +} + +func (BuildStatusOutput) SwaggerDoc() map[string]string { + return map_BuildStatusOutput +} + +var map_BuildStatusOutputTo = map[string]string{ + "": "BuildStatusOutputTo describes the status of the built image with regards to image registry to which it was supposed to be pushed.", + "imageDigest": "imageDigest is the digest of the built container image. The digest uniquely identifies the image in the registry to which it was pushed.\n\nPlease note that this field may not always be set even if the push completes successfully - e.g. when the registry returns no digest or returns it in a format that the builder doesn't understand.", +} + +func (BuildStatusOutputTo) SwaggerDoc() map[string]string { + return map_BuildStatusOutputTo +} + +var map_BuildStrategy = map[string]string{ + "": "BuildStrategy contains the details of how to perform a build.", + "type": "type is the kind of build strategy.", + "dockerStrategy": "dockerStrategy holds the parameters to the container image build strategy.", + "sourceStrategy": "sourceStrategy holds the parameters to the Source build strategy.", + "customStrategy": "customStrategy holds the parameters to the Custom build strategy", + "jenkinsPipelineStrategy": "JenkinsPipelineStrategy holds the parameters to the Jenkins Pipeline build strategy. Deprecated: use OpenShift Pipelines", +} + +func (BuildStrategy) SwaggerDoc() map[string]string { + return map_BuildStrategy +} + +var map_BuildTriggerCause = map[string]string{ + "": "BuildTriggerCause holds information about a triggered build. It is used for displaying build trigger data for each build and build configuration in oc describe. It is also used to describe which triggers led to the most recent update in the build configuration.", + "message": "message is used to store a human readable message for why the build was triggered. E.g.: \"Manually triggered by user\", \"Configuration change\",etc.", + "genericWebHook": "genericWebHook holds data about a builds generic webhook trigger.", + "githubWebHook": "gitHubWebHook represents data for a GitHub webhook that fired a specific build.", + "imageChangeBuild": "imageChangeBuild stores information about an imagechange event that triggered a new build.", + "gitlabWebHook": "GitLabWebHook represents data for a GitLab webhook that fired a specific build.", + "bitbucketWebHook": "BitbucketWebHook represents data for a Bitbucket webhook that fired a specific build.", +} + +func (BuildTriggerCause) SwaggerDoc() map[string]string { + return map_BuildTriggerCause +} + +var map_BuildTriggerPolicy = map[string]string{ + "": "BuildTriggerPolicy describes a policy for a single trigger that results in a new Build.", + "type": "type is the type of build trigger. Valid values:\n\n- GitHub GitHubWebHookBuildTriggerType represents a trigger that launches builds on GitHub webhook invocations\n\n- Generic GenericWebHookBuildTriggerType represents a trigger that launches builds on generic webhook invocations\n\n- GitLab GitLabWebHookBuildTriggerType represents a trigger that launches builds on GitLab webhook invocations\n\n- Bitbucket BitbucketWebHookBuildTriggerType represents a trigger that launches builds on Bitbucket webhook invocations\n\n- ImageChange ImageChangeBuildTriggerType represents a trigger that launches builds on availability of a new version of an image\n\n- ConfigChange ConfigChangeBuildTriggerType will trigger a build on an initial build config creation WARNING: In the future the behavior will change to trigger a build on any config change", + "github": "github contains the parameters for a GitHub webhook type of trigger", + "generic": "generic contains the parameters for a Generic webhook type of trigger", + "imageChange": "imageChange contains parameters for an ImageChange type of trigger", + "gitlab": "GitLabWebHook contains the parameters for a GitLab webhook type of trigger", + "bitbucket": "BitbucketWebHook contains the parameters for a Bitbucket webhook type of trigger", +} + +func (BuildTriggerPolicy) SwaggerDoc() map[string]string { + return map_BuildTriggerPolicy +} + +var map_BuildVolume = map[string]string{ + "": "BuildVolume describes a volume that is made available to build pods, such that it can be mounted into buildah's runtime environment. Only a subset of Kubernetes Volume sources are supported.", + "name": "name is a unique identifier for this BuildVolume. It must conform to the Kubernetes DNS label standard and be unique within the pod. Names that collide with those added by the build controller will result in a failed build with an error message detailing which name caused the error. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "source": "source represents the location and type of the mounted volume.", + "mounts": "mounts represents the location of the volume in the image build container", +} + +func (BuildVolume) SwaggerDoc() map[string]string { + return map_BuildVolume +} + +var map_BuildVolumeMount = map[string]string{ + "": "BuildVolumeMount describes the mounting of a Volume within buildah's runtime environment.", + "destinationPath": "destinationPath is the path within the buildah runtime environment at which the volume should be mounted. The transient mount within the build image and the backing volume will both be mounted read only. Must be an absolute path, must not contain '..' or ':', and must not collide with a destination path generated by the builder process Paths that collide with those added by the build controller will result in a failed build with an error message detailing which path caused the error.", +} + +func (BuildVolumeMount) SwaggerDoc() map[string]string { + return map_BuildVolumeMount +} + +var map_BuildVolumeSource = map[string]string{ + "": "BuildVolumeSource represents the source of a volume to mount Only one of its supported types may be specified at any given time.", + "type": "type is the BuildVolumeSourceType for the volume source. Type must match the populated volume source. Valid types are: Secret, ConfigMap", + "secret": "secret represents a Secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret", + "configMap": "configMap represents a ConfigMap that should populate this volume", + "csi": "csi represents ephemeral storage provided by external CSI drivers which support this capability", +} + +func (BuildVolumeSource) SwaggerDoc() map[string]string { + return map_BuildVolumeSource +} + +var map_CommonSpec = map[string]string{ + "": "CommonSpec encapsulates all the inputs necessary to represent a build.", + "serviceAccount": "serviceAccount is the name of the ServiceAccount to use to run the pod created by this build. The pod will be allowed to use secrets referenced by the ServiceAccount", + "source": "source describes the SCM in use.", + "revision": "revision is the information from the source for a specific repo snapshot. This is optional.", + "strategy": "strategy defines how to perform a build.", + "output": "output describes the container image the Strategy should produce.", + "resources": "resources computes resource requirements to execute the build.", + "postCommit": "postCommit is a build hook executed after the build output image is committed, before it is pushed to a registry.", + "completionDeadlineSeconds": "completionDeadlineSeconds is an optional duration in seconds, counted from the time when a build pod gets scheduled in the system, that the build may be active on a node before the system actively tries to terminate the build; value must be positive integer", + "nodeSelector": "nodeSelector is a selector which must be true for the build pod to fit on a node If nil, it can be overridden by default build nodeselector values for the cluster. If set to an empty map or a map with any values, default build nodeselector values are ignored.", + "mountTrustedCA": "mountTrustedCA bind mounts the cluster's trusted certificate authorities, as defined in the cluster's proxy configuration, into the build. This lets processes within a build trust components signed by custom PKI certificate authorities, such as private artifact repositories and HTTPS proxies.\n\nWhen this field is set to true, the contents of `/etc/pki/ca-trust` within the build are managed by the build container, and any changes to this directory or its subdirectories (for example - within a Dockerfile `RUN` instruction) are not persisted in the build's output image.", +} + +func (CommonSpec) SwaggerDoc() map[string]string { + return map_CommonSpec +} + +var map_CommonWebHookCause = map[string]string{ + "": "CommonWebHookCause factors out the identical format of these webhook causes into struct so we can share it in the specific causes; it is too late for GitHub and Generic but we can leverage this pattern with GitLab and Bitbucket.", + "revision": "Revision is the git source revision information of the trigger.", + "secret": "Secret is the obfuscated webhook secret that triggered a build.", +} + +func (CommonWebHookCause) SwaggerDoc() map[string]string { + return map_CommonWebHookCause +} + +var map_ConfigMapBuildSource = map[string]string{ + "": "ConfigMapBuildSource describes a configmap and its destination directory that will be used only at the build time. The content of the configmap referenced here will be copied into the destination directory instead of mounting.", + "configMap": "configMap is a reference to an existing configmap that you want to use in your build.", + "destinationDir": "destinationDir is the directory where the files from the configmap should be available for the build time. For the Source build strategy, these will be injected into a container where the assemble script runs. For the container image build strategy, these will be copied into the build directory, where the Dockerfile is located, so users can ADD or COPY them during container image build.", +} + +func (ConfigMapBuildSource) SwaggerDoc() map[string]string { + return map_ConfigMapBuildSource +} + +var map_CustomBuildStrategy = map[string]string{ + "": "CustomBuildStrategy defines input parameters specific to Custom build.", + "from": "from is reference to an DockerImage, ImageStreamTag, or ImageStreamImage from which the container image should be pulled", + "pullSecret": "pullSecret is the name of a Secret that would be used for setting up the authentication for pulling the container images from the private Docker registries", + "env": "env contains additional environment variables you want to pass into a builder container.", + "exposeDockerSocket": "exposeDockerSocket will allow running Docker commands (and build container images) from inside the container.", + "forcePull": "forcePull describes if the controller should configure the build pod to always pull the images for the builder or only pull if it is not present locally", + "secrets": "secrets is a list of additional secrets that will be included in the build pod", + "buildAPIVersion": "buildAPIVersion is the requested API version for the Build object serialized and passed to the custom builder", +} + +func (CustomBuildStrategy) SwaggerDoc() map[string]string { + return map_CustomBuildStrategy +} + +var map_DockerBuildStrategy = map[string]string{ + "": "DockerBuildStrategy defines input parameters specific to container image build.", + "from": "from is a reference to an DockerImage, ImageStreamTag, or ImageStreamImage which overrides the FROM image in the Dockerfile for the build. If the Dockerfile uses multi-stage builds, this will replace the image in the last FROM directive of the file.", + "pullSecret": "pullSecret is the name of a Secret that would be used for setting up the authentication for pulling the container images from the private Docker registries", + "noCache": "noCache if set to true indicates that the container image build must be executed with the --no-cache=true flag", + "env": "env contains additional environment variables you want to pass into a builder container.", + "forcePull": "forcePull describes if the builder should pull the images from registry prior to building.", + "dockerfilePath": "dockerfilePath is the path of the Dockerfile that will be used to build the container image, relative to the root of the context (contextDir). Defaults to `Dockerfile` if unset.", + "buildArgs": "buildArgs contains build arguments that will be resolved in the Dockerfile. See https://docs.docker.com/engine/reference/builder/#/arg for more details. NOTE: Only the 'name' and 'value' fields are supported. Any settings on the 'valueFrom' field are ignored.", + "imageOptimizationPolicy": "imageOptimizationPolicy describes what optimizations the system can use when building images to reduce the final size or time spent building the image. The default policy is 'None' which means the final build image will be equivalent to an image created by the container image build API. The experimental policy 'SkipLayers' will avoid commiting new layers in between each image step, and will fail if the Dockerfile cannot provide compatibility with the 'None' policy. An additional experimental policy 'SkipLayersAndWarn' is the same as 'SkipLayers' but simply warns if compatibility cannot be preserved.", + "volumes": "volumes is a list of input volumes that can be mounted into the builds runtime environment. Only a subset of Kubernetes Volume sources are supported by builds. More info: https://kubernetes.io/docs/concepts/storage/volumes", +} + +func (DockerBuildStrategy) SwaggerDoc() map[string]string { + return map_DockerBuildStrategy +} + +var map_DockerStrategyOptions = map[string]string{ + "": "DockerStrategyOptions contains extra strategy options for container image builds", + "buildArgs": "Args contains any build arguments that are to be passed to Docker. See https://docs.docker.com/engine/reference/builder/#/arg for more details", + "noCache": "noCache overrides the docker-strategy noCache option in the build config", +} + +func (DockerStrategyOptions) SwaggerDoc() map[string]string { + return map_DockerStrategyOptions +} + +var map_GenericWebHookCause = map[string]string{ + "": "GenericWebHookCause holds information about a generic WebHook that triggered a build.", + "revision": "revision is an optional field that stores the git source revision information of the generic webhook trigger when it is available.", + "secret": "secret is the obfuscated webhook secret that triggered a build.", +} + +func (GenericWebHookCause) SwaggerDoc() map[string]string { + return map_GenericWebHookCause +} + +var map_GenericWebHookEvent = map[string]string{ + "": "GenericWebHookEvent is the payload expected for a generic webhook post", + "type": "type is the type of source repository", + "git": "git is the git information if the Type is BuildSourceGit", + "env": "env contains additional environment variables you want to pass into a builder container. ValueFrom is not supported.", + "dockerStrategyOptions": "DockerStrategyOptions contains additional docker-strategy specific options for the build", +} + +func (GenericWebHookEvent) SwaggerDoc() map[string]string { + return map_GenericWebHookEvent +} + +var map_GitBuildSource = map[string]string{ + "": "GitBuildSource defines the parameters of a Git SCM", + "uri": "uri points to the source that will be built. The structure of the source will depend on the type of build to run", + "ref": "ref is the branch/tag/ref to build.", +} + +func (GitBuildSource) SwaggerDoc() map[string]string { + return map_GitBuildSource +} + +var map_GitHubWebHookCause = map[string]string{ + "": "GitHubWebHookCause has information about a GitHub webhook that triggered a build.", + "revision": "revision is the git revision information of the trigger.", + "secret": "secret is the obfuscated webhook secret that triggered a build.", +} + +func (GitHubWebHookCause) SwaggerDoc() map[string]string { + return map_GitHubWebHookCause +} + +var map_GitInfo = map[string]string{ + "": "GitInfo is the aggregated git information for a generic webhook post", + "refs": "Refs is a list of GitRefs for the provided repo - generally sent when used from a post-receive hook. This field is optional and is used when sending multiple refs", +} + +func (GitInfo) SwaggerDoc() map[string]string { + return map_GitInfo +} + +var map_GitLabWebHookCause = map[string]string{ + "": "GitLabWebHookCause has information about a GitLab webhook that triggered a build.", +} + +func (GitLabWebHookCause) SwaggerDoc() map[string]string { + return map_GitLabWebHookCause +} + +var map_GitRefInfo = map[string]string{ + "": "GitRefInfo is a single ref", +} + +func (GitRefInfo) SwaggerDoc() map[string]string { + return map_GitRefInfo +} + +var map_GitSourceRevision = map[string]string{ + "": "GitSourceRevision is the commit information from a git source for a build", + "commit": "commit is the commit hash identifying a specific commit", + "author": "author is the author of a specific commit", + "committer": "committer is the committer of a specific commit", + "message": "message is the description of a specific commit", +} + +func (GitSourceRevision) SwaggerDoc() map[string]string { + return map_GitSourceRevision +} + +var map_ImageChangeCause = map[string]string{ + "": "ImageChangeCause contains information about the image that triggered a build", + "imageID": "imageID is the ID of the image that triggered a new build.", + "fromRef": "fromRef contains detailed information about an image that triggered a build.", +} + +func (ImageChangeCause) SwaggerDoc() map[string]string { + return map_ImageChangeCause +} + +var map_ImageChangeTrigger = map[string]string{ + "": "ImageChangeTrigger allows builds to be triggered when an ImageStream changes", + "lastTriggeredImageID": "lastTriggeredImageID is used internally by the ImageChangeController to save last used image ID for build This field is deprecated and will be removed in a future release. Deprecated", + "from": "from is a reference to an ImageStreamTag that will trigger a build when updated It is optional. If no From is specified, the From image from the build strategy will be used. Only one ImageChangeTrigger with an empty From reference is allowed in a build configuration.", + "paused": "paused is true if this trigger is temporarily disabled. Optional.", +} + +func (ImageChangeTrigger) SwaggerDoc() map[string]string { + return map_ImageChangeTrigger +} + +var map_ImageChangeTriggerStatus = map[string]string{ + "": "ImageChangeTriggerStatus tracks the latest resolved status of the associated ImageChangeTrigger policy specified in the BuildConfigSpec.Triggers struct.", + "lastTriggeredImageID": "lastTriggeredImageID represents the sha/id of the ImageStreamTag when a Build for this BuildConfig was started. The lastTriggeredImageID is updated each time a Build for this BuildConfig is started, even if this ImageStreamTag is not the reason the Build is started.", + "from": "from is the ImageStreamTag that is the source of the trigger.", + "lastTriggerTime": "lastTriggerTime is the last time this particular ImageStreamTag triggered a Build to start. This field is only updated when this trigger specifically started a Build.", +} + +func (ImageChangeTriggerStatus) SwaggerDoc() map[string]string { + return map_ImageChangeTriggerStatus +} + +var map_ImageLabel = map[string]string{ + "": "ImageLabel represents a label applied to the resulting image.", + "name": "name defines the name of the label. It must have non-zero length.", + "value": "value defines the literal value of the label.", +} + +func (ImageLabel) SwaggerDoc() map[string]string { + return map_ImageLabel +} + +var map_ImageSource = map[string]string{ + "": "ImageSource is used to describe build source that will be extracted from an image or used during a multi stage build. A reference of type ImageStreamTag, ImageStreamImage or DockerImage may be used. A pull secret can be specified to pull the image from an external registry or override the default service account secret if pulling from the internal registry. Image sources can either be used to extract content from an image and place it into the build context along with the repository source, or used directly during a multi-stage container image build to allow content to be copied without overwriting the contents of the repository source (see the 'paths' and 'as' fields).", + "from": "from is a reference to an ImageStreamTag, ImageStreamImage, or DockerImage to copy source from.", + "as": "A list of image names that this source will be used in place of during a multi-stage container image build. For instance, a Dockerfile that uses \"COPY --from=nginx:latest\" will first check for an image source that has \"nginx:latest\" in this field before attempting to pull directly. If the Dockerfile does not reference an image source it is ignored. This field and paths may both be set, in which case the contents will be used twice.", + "paths": "paths is a list of source and destination paths to copy from the image. This content will be copied into the build context prior to starting the build. If no paths are set, the build context will not be altered.", + "pullSecret": "pullSecret is a reference to a secret to be used to pull the image from a registry If the image is pulled from the OpenShift registry, this field does not need to be set.", +} + +func (ImageSource) SwaggerDoc() map[string]string { + return map_ImageSource +} + +var map_ImageSourcePath = map[string]string{ + "": "ImageSourcePath describes a path to be copied from a source image and its destination within the build directory.", + "sourcePath": "sourcePath is the absolute path of the file or directory inside the image to copy to the build directory. If the source path ends in /. then the content of the directory will be copied, but the directory itself will not be created at the destination.", + "destinationDir": "destinationDir is the relative directory within the build directory where files copied from the image are placed.", +} + +func (ImageSourcePath) SwaggerDoc() map[string]string { + return map_ImageSourcePath +} + +var map_ImageStreamTagReference = map[string]string{ + "": "ImageStreamTagReference references the ImageStreamTag in an image change trigger by namespace and name.", + "namespace": "namespace is the namespace where the ImageStreamTag for an ImageChangeTrigger is located", + "name": "name is the name of the ImageStreamTag for an ImageChangeTrigger", +} + +func (ImageStreamTagReference) SwaggerDoc() map[string]string { + return map_ImageStreamTagReference +} + +var map_JenkinsPipelineBuildStrategy = map[string]string{ + "": "JenkinsPipelineBuildStrategy holds parameters specific to a Jenkins Pipeline build. Deprecated: use OpenShift Pipelines", + "jenkinsfilePath": "JenkinsfilePath is the optional path of the Jenkinsfile that will be used to configure the pipeline relative to the root of the context (contextDir). If both JenkinsfilePath & Jenkinsfile are both not specified, this defaults to Jenkinsfile in the root of the specified contextDir.", + "jenkinsfile": "Jenkinsfile defines the optional raw contents of a Jenkinsfile which defines a Jenkins pipeline build.", + "env": "env contains additional environment variables you want to pass into a build pipeline.", +} + +func (JenkinsPipelineBuildStrategy) SwaggerDoc() map[string]string { + return map_JenkinsPipelineBuildStrategy +} + +var map_ProxyConfig = map[string]string{ + "": "ProxyConfig defines what proxies to use for an operation", + "httpProxy": "httpProxy is a proxy used to reach the git repository over http", + "httpsProxy": "httpsProxy is a proxy used to reach the git repository over https", + "noProxy": "noProxy is the list of domains for which the proxy should not be used", +} + +func (ProxyConfig) SwaggerDoc() map[string]string { + return map_ProxyConfig +} + +var map_SecretBuildSource = map[string]string{ + "": "SecretBuildSource describes a secret and its destination directory that will be used only at the build time. The content of the secret referenced here will be copied into the destination directory instead of mounting.", + "secret": "secret is a reference to an existing secret that you want to use in your build.", + "destinationDir": "destinationDir is the directory where the files from the secret should be available for the build time. For the Source build strategy, these will be injected into a container where the assemble script runs. Later, when the script finishes, all files injected will be truncated to zero length. For the container image build strategy, these will be copied into the build directory, where the Dockerfile is located, so users can ADD or COPY them during container image build.", +} + +func (SecretBuildSource) SwaggerDoc() map[string]string { + return map_SecretBuildSource +} + +var map_SecretLocalReference = map[string]string{ + "": "SecretLocalReference contains information that points to the local secret being used", + "name": "Name is the name of the resource in the same namespace being referenced", +} + +func (SecretLocalReference) SwaggerDoc() map[string]string { + return map_SecretLocalReference +} + +var map_SecretSpec = map[string]string{ + "": "SecretSpec specifies a secret to be included in a build pod and its corresponding mount point", + "secretSource": "secretSource is a reference to the secret", + "mountPath": "mountPath is the path at which to mount the secret", +} + +func (SecretSpec) SwaggerDoc() map[string]string { + return map_SecretSpec +} + +var map_SourceBuildStrategy = map[string]string{ + "": "SourceBuildStrategy defines input parameters specific to an Source build.", + "from": "from is reference to an DockerImage, ImageStreamTag, or ImageStreamImage from which the container image should be pulled", + "pullSecret": "pullSecret is the name of a Secret that would be used for setting up the authentication for pulling the container images from the private Docker registries", + "env": "env contains additional environment variables you want to pass into a builder container.", + "scripts": "scripts is the location of Source scripts", + "incremental": "incremental flag forces the Source build to do incremental builds if true.", + "forcePull": "forcePull describes if the builder should pull the images from registry prior to building.", + "volumes": "volumes is a list of input volumes that can be mounted into the builds runtime environment. Only a subset of Kubernetes Volume sources are supported by builds. More info: https://kubernetes.io/docs/concepts/storage/volumes", +} + +func (SourceBuildStrategy) SwaggerDoc() map[string]string { + return map_SourceBuildStrategy +} + +var map_SourceControlUser = map[string]string{ + "": "SourceControlUser defines the identity of a user of source control", + "name": "name of the source control user", + "email": "email of the source control user", +} + +func (SourceControlUser) SwaggerDoc() map[string]string { + return map_SourceControlUser +} + +var map_SourceRevision = map[string]string{ + "": "SourceRevision is the revision or commit information from the source for the build", + "type": "type of the build source, may be one of 'Source', 'Dockerfile', 'Binary', or 'Images'", + "git": "Git contains information about git-based build source", +} + +func (SourceRevision) SwaggerDoc() map[string]string { + return map_SourceRevision +} + +var map_SourceStrategyOptions = map[string]string{ + "": "SourceStrategyOptions contains extra strategy options for Source builds", + "incremental": "incremental overrides the source-strategy incremental option in the build config", +} + +func (SourceStrategyOptions) SwaggerDoc() map[string]string { + return map_SourceStrategyOptions +} + +var map_StageInfo = map[string]string{ + "": "StageInfo contains details about a build stage.", + "name": "name is a unique identifier for each build stage that occurs.", + "startTime": "startTime is a timestamp representing the server time when this Stage started. It is represented in RFC3339 form and is in UTC.", + "durationMilliseconds": "durationMilliseconds identifies how long the stage took to complete in milliseconds. Note: the duration of a stage can exceed the sum of the duration of the steps within the stage as not all actions are accounted for in explicit build steps.", + "steps": "steps contains details about each step that occurs during a build stage including start time and duration in milliseconds.", +} + +func (StageInfo) SwaggerDoc() map[string]string { + return map_StageInfo +} + +var map_StepInfo = map[string]string{ + "": "StepInfo contains details about a build step.", + "name": "name is a unique identifier for each build step.", + "startTime": "startTime is a timestamp representing the server time when this Step started. it is represented in RFC3339 form and is in UTC.", + "durationMilliseconds": "durationMilliseconds identifies how long the step took to complete in milliseconds.", +} + +func (StepInfo) SwaggerDoc() map[string]string { + return map_StepInfo +} + +var map_WebHookTrigger = map[string]string{ + "": "WebHookTrigger is a trigger that gets invoked using a webhook type of post", + "secret": "secret used to validate requests. Deprecated: use SecretReference instead.", + "allowEnv": "allowEnv determines whether the webhook can set environment variables; can only be set to true for GenericWebHook.", + "secretReference": "secretReference is a reference to a secret in the same namespace, containing the value to be validated when the webhook is invoked. The secret being referenced must contain a key named \"WebHookSecretKey\", the value of which will be checked against the value supplied in the webhook invocation.", +} + +func (WebHookTrigger) SwaggerDoc() map[string]string { + return map_WebHookTrigger +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/api/cloudnetwork/OWNERS b/vendor/github.com/openshift/api/cloudnetwork/OWNERS new file mode 100644 index 000000000..0bc20628a --- /dev/null +++ b/vendor/github.com/openshift/api/cloudnetwork/OWNERS @@ -0,0 +1,6 @@ +reviewers: + - danwinship + - dcbw + - knobunc + - squeed + - abhat diff --git a/vendor/github.com/openshift/api/cloudnetwork/install.go b/vendor/github.com/openshift/api/cloudnetwork/install.go new file mode 100644 index 000000000..f839ebf00 --- /dev/null +++ b/vendor/github.com/openshift/api/cloudnetwork/install.go @@ -0,0 +1,26 @@ +package cloudnetwork + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + cloudnetworkv1 "github.com/openshift/api/cloudnetwork/v1" +) + +const ( + GroupName = "cloud.network.openshift.io" +) + +var ( + schemeBuilder = runtime.NewSchemeBuilder(cloudnetworkv1.Install) + // Install is a function which adds every version of this group to a scheme + Install = schemeBuilder.AddToScheme +) + +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +func Kind(kind string) schema.GroupKind { + return schema.GroupKind{Group: GroupName, Kind: kind} +} diff --git a/vendor/github.com/openshift/api/cloudnetwork/v1/001-cloudprivateipconfig.crd.yaml b/vendor/github.com/openshift/api/cloudnetwork/v1/001-cloudprivateipconfig.crd.yaml new file mode 100644 index 000000000..d4e9e0b88 --- /dev/null +++ b/vendor/github.com/openshift/api/cloudnetwork/v1/001-cloudprivateipconfig.crd.yaml @@ -0,0 +1,107 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/859 + name: cloudprivateipconfigs.cloud.network.openshift.io +spec: + group: cloud.network.openshift.io + names: + kind: CloudPrivateIPConfig + listKind: CloudPrivateIPConfigList + plural: cloudprivateipconfigs + singular: cloudprivateipconfig + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "CloudPrivateIPConfig performs an assignment of a private IP address to the primary NIC associated with cloud VMs. This is done by specifying the IP and Kubernetes node which the IP should be assigned to. This CRD is intended to be used by the network plugin which manages the cluster network. The spec side represents the desired state requested by the network plugin, and the status side represents the current state that this CRD's controller has executed. No users will have permission to modify it, and if a cluster-admin decides to edit it for some reason, their changes will be overwritten the next time the network plugin reconciles the object. Note: the CR's name must specify the requested private IP address (can be IPv4 or IPv6). \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + properties: + name: + anyOf: + - format: ipv4 + - format: ipv6 + type: string + type: object + spec: + description: spec is the definition of the desired private IP request. + properties: + node: + description: 'node is the node name, as specified by the Kubernetes field: node.metadata.name' + type: string + type: object + status: + description: status is the observed status of the desired private IP request. Read-only. + properties: + conditions: + description: condition is the assignment condition of the private IP and its status + items: + description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, \n type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + node: + description: 'node is the node name, as specified by the Kubernetes field: node.metadata.name' + type: string + required: + - conditions + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/vendor/github.com/openshift/api/cloudnetwork/v1/001-cloudprivateipconfig.crd.yaml-patch b/vendor/github.com/openshift/api/cloudnetwork/v1/001-cloudprivateipconfig.crd.yaml-patch new file mode 100644 index 000000000..1239c0543 --- /dev/null +++ b/vendor/github.com/openshift/api/cloudnetwork/v1/001-cloudprivateipconfig.crd.yaml-patch @@ -0,0 +1,10 @@ +- op: add + path: /spec/versions/name=v1/schema/openAPIV3Schema/properties/metadata + value: + type: object + properties: + name: + type: string + anyOf: + - format: ipv4 + - format: ipv6 diff --git a/vendor/github.com/openshift/api/cloudnetwork/v1/Makefile b/vendor/github.com/openshift/api/cloudnetwork/v1/Makefile new file mode 100644 index 000000000..ef9799eaf --- /dev/null +++ b/vendor/github.com/openshift/api/cloudnetwork/v1/Makefile @@ -0,0 +1,3 @@ +.PHONY: test +test: + make -C ../../tests test GINKGO_EXTRA_ARGS=--focus="cloud.network.openshift.io/v1" diff --git a/vendor/github.com/openshift/api/cloudnetwork/v1/doc.go b/vendor/github.com/openshift/api/cloudnetwork/v1/doc.go new file mode 100644 index 000000000..1d495ee24 --- /dev/null +++ b/vendor/github.com/openshift/api/cloudnetwork/v1/doc.go @@ -0,0 +1,5 @@ +// Package v1 contains API Schema definitions for the cloud network v1 API group +// +k8s:deepcopy-gen=package,register +// +groupName=cloud.network.openshift.io +// +kubebuilder:validation:Optional +package v1 diff --git a/vendor/github.com/openshift/api/cloudnetwork/v1/generated.pb.go b/vendor/github.com/openshift/api/cloudnetwork/v1/generated.pb.go new file mode 100644 index 000000000..9635f70d0 --- /dev/null +++ b/vendor/github.com/openshift/api/cloudnetwork/v1/generated.pb.go @@ -0,0 +1,1045 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/openshift/api/cloudnetwork/v1/generated.proto + +package v1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *CloudPrivateIPConfig) Reset() { *m = CloudPrivateIPConfig{} } +func (*CloudPrivateIPConfig) ProtoMessage() {} +func (*CloudPrivateIPConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_454253a7ab01c6d0, []int{0} +} +func (m *CloudPrivateIPConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CloudPrivateIPConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CloudPrivateIPConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_CloudPrivateIPConfig.Merge(m, src) +} +func (m *CloudPrivateIPConfig) XXX_Size() int { + return m.Size() +} +func (m *CloudPrivateIPConfig) XXX_DiscardUnknown() { + xxx_messageInfo_CloudPrivateIPConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_CloudPrivateIPConfig proto.InternalMessageInfo + +func (m *CloudPrivateIPConfigList) Reset() { *m = CloudPrivateIPConfigList{} } +func (*CloudPrivateIPConfigList) ProtoMessage() {} +func (*CloudPrivateIPConfigList) Descriptor() ([]byte, []int) { + return fileDescriptor_454253a7ab01c6d0, []int{1} +} +func (m *CloudPrivateIPConfigList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CloudPrivateIPConfigList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CloudPrivateIPConfigList) XXX_Merge(src proto.Message) { + xxx_messageInfo_CloudPrivateIPConfigList.Merge(m, src) +} +func (m *CloudPrivateIPConfigList) XXX_Size() int { + return m.Size() +} +func (m *CloudPrivateIPConfigList) XXX_DiscardUnknown() { + xxx_messageInfo_CloudPrivateIPConfigList.DiscardUnknown(m) +} + +var xxx_messageInfo_CloudPrivateIPConfigList proto.InternalMessageInfo + +func (m *CloudPrivateIPConfigSpec) Reset() { *m = CloudPrivateIPConfigSpec{} } +func (*CloudPrivateIPConfigSpec) ProtoMessage() {} +func (*CloudPrivateIPConfigSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_454253a7ab01c6d0, []int{2} +} +func (m *CloudPrivateIPConfigSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CloudPrivateIPConfigSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CloudPrivateIPConfigSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_CloudPrivateIPConfigSpec.Merge(m, src) +} +func (m *CloudPrivateIPConfigSpec) XXX_Size() int { + return m.Size() +} +func (m *CloudPrivateIPConfigSpec) XXX_DiscardUnknown() { + xxx_messageInfo_CloudPrivateIPConfigSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_CloudPrivateIPConfigSpec proto.InternalMessageInfo + +func (m *CloudPrivateIPConfigStatus) Reset() { *m = CloudPrivateIPConfigStatus{} } +func (*CloudPrivateIPConfigStatus) ProtoMessage() {} +func (*CloudPrivateIPConfigStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_454253a7ab01c6d0, []int{3} +} +func (m *CloudPrivateIPConfigStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CloudPrivateIPConfigStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CloudPrivateIPConfigStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_CloudPrivateIPConfigStatus.Merge(m, src) +} +func (m *CloudPrivateIPConfigStatus) XXX_Size() int { + return m.Size() +} +func (m *CloudPrivateIPConfigStatus) XXX_DiscardUnknown() { + xxx_messageInfo_CloudPrivateIPConfigStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_CloudPrivateIPConfigStatus proto.InternalMessageInfo + +func init() { + proto.RegisterType((*CloudPrivateIPConfig)(nil), "github.com.openshift.api.cloudnetwork.v1.CloudPrivateIPConfig") + proto.RegisterType((*CloudPrivateIPConfigList)(nil), "github.com.openshift.api.cloudnetwork.v1.CloudPrivateIPConfigList") + proto.RegisterType((*CloudPrivateIPConfigSpec)(nil), "github.com.openshift.api.cloudnetwork.v1.CloudPrivateIPConfigSpec") + proto.RegisterType((*CloudPrivateIPConfigStatus)(nil), "github.com.openshift.api.cloudnetwork.v1.CloudPrivateIPConfigStatus") +} + +func init() { + proto.RegisterFile("github.com/openshift/api/cloudnetwork/v1/generated.proto", fileDescriptor_454253a7ab01c6d0) +} + +var fileDescriptor_454253a7ab01c6d0 = []byte{ + // 483 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x94, 0xc1, 0x6e, 0xd3, 0x30, + 0x18, 0xc7, 0xe3, 0xae, 0x9b, 0x86, 0x07, 0x08, 0x45, 0x1c, 0xa2, 0x1e, 0xbc, 0xaa, 0xa7, 0x5e, + 0xb0, 0xe9, 0x84, 0xd0, 0x0e, 0x88, 0x43, 0xca, 0x65, 0x12, 0x8c, 0x29, 0xdc, 0x10, 0x07, 0x5c, + 0xc7, 0x4d, 0x4d, 0x17, 0x3b, 0x8a, 0x9d, 0x22, 0x6e, 0x3c, 0x02, 0xef, 0xc0, 0xcb, 0xf4, 0xc0, + 0x61, 0xc7, 0x5d, 0x98, 0x68, 0x78, 0x11, 0x64, 0x37, 0x6d, 0x23, 0xd6, 0x69, 0x91, 0x7a, 0xcb, + 0xf7, 0x25, 0xff, 0xff, 0xef, 0xfb, 0xfe, 0x8e, 0x0c, 0x4f, 0x13, 0x61, 0x26, 0xc5, 0x08, 0x33, + 0x95, 0x12, 0x95, 0x71, 0xa9, 0x27, 0x62, 0x6c, 0x08, 0xcd, 0x04, 0x61, 0x97, 0xaa, 0x88, 0x25, + 0x37, 0x5f, 0x55, 0x3e, 0x25, 0xb3, 0x01, 0x49, 0xb8, 0xe4, 0x39, 0x35, 0x3c, 0xc6, 0x59, 0xae, + 0x8c, 0xf2, 0xfb, 0x1b, 0x25, 0x5e, 0x2b, 0x31, 0xcd, 0x04, 0xae, 0x2b, 0xf1, 0x6c, 0xd0, 0x79, + 0x56, 0x63, 0x24, 0x2a, 0x51, 0xc4, 0x19, 0x8c, 0x8a, 0xb1, 0xab, 0x5c, 0xe1, 0x9e, 0x96, 0xc6, + 0x9d, 0x17, 0xd3, 0x53, 0x8d, 0x85, 0xb2, 0x43, 0xa4, 0x94, 0x4d, 0x84, 0xe4, 0xf9, 0x37, 0x92, + 0x4d, 0x13, 0xdb, 0xd0, 0x24, 0xe5, 0x86, 0x6e, 0x19, 0xa7, 0x43, 0xee, 0x52, 0xe5, 0x85, 0x34, + 0x22, 0xe5, 0xb7, 0x04, 0x2f, 0xef, 0x13, 0x68, 0x36, 0xe1, 0x29, 0xfd, 0x5f, 0xd7, 0xfb, 0xd5, + 0x82, 0x4f, 0x87, 0x76, 0xc3, 0x8b, 0x5c, 0xcc, 0xa8, 0xe1, 0x67, 0x17, 0x43, 0x25, 0xc7, 0x22, + 0xf1, 0x3f, 0xc3, 0x43, 0x3b, 0x5c, 0x4c, 0x0d, 0x0d, 0x40, 0x17, 0xf4, 0x8f, 0x4e, 0x9e, 0xe3, + 0x25, 0x03, 0xd7, 0x19, 0x38, 0x9b, 0x26, 0xb6, 0xa1, 0xb1, 0xfd, 0x1a, 0xcf, 0x06, 0xf8, 0xfd, + 0xe8, 0x0b, 0x67, 0xe6, 0x1d, 0x37, 0x34, 0xf4, 0xe7, 0x37, 0xc7, 0x5e, 0x79, 0x73, 0x0c, 0x37, + 0xbd, 0x68, 0xed, 0xea, 0xc7, 0xb0, 0xad, 0x33, 0xce, 0x82, 0x96, 0x73, 0x0f, 0x71, 0xd3, 0x13, + 0xc0, 0xdb, 0xe6, 0xfd, 0x90, 0x71, 0x16, 0x3e, 0xac, 0x78, 0x6d, 0x5b, 0x45, 0xce, 0xdd, 0xbf, + 0x84, 0x07, 0xda, 0x50, 0x53, 0xe8, 0x60, 0xcf, 0x71, 0xde, 0xec, 0xc8, 0x71, 0x5e, 0xe1, 0xe3, + 0x8a, 0x74, 0xb0, 0xac, 0xa3, 0x8a, 0xd1, 0xfb, 0x0d, 0x60, 0xb0, 0x4d, 0xf6, 0x56, 0x68, 0xe3, + 0x7f, 0xba, 0x15, 0x29, 0x6e, 0x16, 0xa9, 0x55, 0xbb, 0x40, 0x9f, 0x54, 0xd8, 0xc3, 0x55, 0xa7, + 0x16, 0x27, 0x83, 0xfb, 0xc2, 0xf0, 0x54, 0x07, 0xad, 0xee, 0x5e, 0xff, 0xe8, 0xe4, 0xf5, 0x6e, + 0x7b, 0x86, 0x8f, 0x2a, 0xd4, 0xfe, 0x99, 0x35, 0x8d, 0x96, 0xde, 0xbd, 0x57, 0xdb, 0xd7, 0xb3, + 0x79, 0xfb, 0x5d, 0xd8, 0x96, 0x2a, 0xe6, 0x6e, 0xb5, 0x07, 0x9b, 0xb3, 0x38, 0x57, 0x31, 0x8f, + 0xdc, 0x9b, 0xde, 0x4f, 0x00, 0x3b, 0x77, 0x87, 0x7a, 0xbf, 0x81, 0xcf, 0x20, 0x64, 0x4a, 0xc6, + 0xc2, 0x08, 0x25, 0x57, 0x8b, 0x92, 0x66, 0x19, 0x0e, 0x57, 0xba, 0xcd, 0x5f, 0xb9, 0x6e, 0xe9, + 0xa8, 0x66, 0x1b, 0x9e, 0xcf, 0x17, 0xc8, 0xbb, 0x5a, 0x20, 0xef, 0x7a, 0x81, 0xbc, 0xef, 0x25, + 0x02, 0xf3, 0x12, 0x81, 0xab, 0x12, 0x81, 0xeb, 0x12, 0x81, 0x3f, 0x25, 0x02, 0x3f, 0xfe, 0x22, + 0xef, 0x63, 0xbf, 0xe9, 0x55, 0xf3, 0x2f, 0x00, 0x00, 0xff, 0xff, 0xc8, 0xf0, 0xc5, 0x6e, 0x95, + 0x04, 0x00, 0x00, +} + +func (m *CloudPrivateIPConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CloudPrivateIPConfig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CloudPrivateIPConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CloudPrivateIPConfigList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CloudPrivateIPConfigList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CloudPrivateIPConfigList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CloudPrivateIPConfigSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CloudPrivateIPConfigSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CloudPrivateIPConfigSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Node) + copy(dAtA[i:], m.Node) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Node))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CloudPrivateIPConfigStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CloudPrivateIPConfigStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CloudPrivateIPConfigStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Node) + copy(dAtA[i:], m.Node) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Node))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *CloudPrivateIPConfig) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *CloudPrivateIPConfigList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *CloudPrivateIPConfigSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Node) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *CloudPrivateIPConfigStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Node) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *CloudPrivateIPConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CloudPrivateIPConfig{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "CloudPrivateIPConfigSpec", "CloudPrivateIPConfigSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "CloudPrivateIPConfigStatus", "CloudPrivateIPConfigStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *CloudPrivateIPConfigList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]CloudPrivateIPConfig{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "CloudPrivateIPConfig", "CloudPrivateIPConfig", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&CloudPrivateIPConfigList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *CloudPrivateIPConfigSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CloudPrivateIPConfigSpec{`, + `Node:` + fmt.Sprintf("%v", this.Node) + `,`, + `}`, + }, "") + return s +} +func (this *CloudPrivateIPConfigStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]Condition{" + for _, f := range this.Conditions { + repeatedStringForConditions += fmt.Sprintf("%v", f) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&CloudPrivateIPConfigStatus{`, + `Node:` + fmt.Sprintf("%v", this.Node) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *CloudPrivateIPConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CloudPrivateIPConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CloudPrivateIPConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CloudPrivateIPConfigList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CloudPrivateIPConfigList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CloudPrivateIPConfigList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, CloudPrivateIPConfig{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CloudPrivateIPConfigSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CloudPrivateIPConfigSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CloudPrivateIPConfigSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Node", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Node = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CloudPrivateIPConfigStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CloudPrivateIPConfigStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CloudPrivateIPConfigStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Node", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Node = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, v1.Condition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/openshift/api/cloudnetwork/v1/generated.proto b/vendor/github.com/openshift/api/cloudnetwork/v1/generated.proto new file mode 100644 index 000000000..6c3688af6 --- /dev/null +++ b/vendor/github.com/openshift/api/cloudnetwork/v1/generated.proto @@ -0,0 +1,87 @@ + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package github.com.openshift.api.cloudnetwork.v1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "github.com/openshift/api/cloudnetwork/v1"; + +// CloudPrivateIPConfig performs an assignment of a private IP address to the +// primary NIC associated with cloud VMs. This is done by specifying the IP and +// Kubernetes node which the IP should be assigned to. This CRD is intended to +// be used by the network plugin which manages the cluster network. The spec +// side represents the desired state requested by the network plugin, and the +// status side represents the current state that this CRD's controller has +// executed. No users will have permission to modify it, and if a cluster-admin +// decides to edit it for some reason, their changes will be overwritten the +// next time the network plugin reconciles the object. Note: the CR's name +// must specify the requested private IP address (can be IPv4 or IPv6). +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:openapi-gen=true +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:path=cloudprivateipconfigs,scope=Cluster +// +openshift:compatibility-gen:level=1 +message CloudPrivateIPConfig { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // spec is the definition of the desired private IP request. + // +kubebuilder:validation:Required + // +required + optional CloudPrivateIPConfigSpec spec = 2; + + // status is the observed status of the desired private IP request. Read-only. + // +kubebuilder:validation:Optional + // +optional + optional CloudPrivateIPConfigStatus status = 3; +} + +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +resource:path=cloudprivateipconfig +// CloudPrivateIPConfigList is the list of CloudPrivateIPConfigList. +// +openshift:compatibility-gen:level=1 +message CloudPrivateIPConfigList { + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of CloudPrivateIPConfig. + repeated CloudPrivateIPConfig items = 2; +} + +// CloudPrivateIPConfigSpec consists of a node name which the private IP should be assigned to. +// +k8s:openapi-gen=true +message CloudPrivateIPConfigSpec { + // node is the node name, as specified by the Kubernetes field: node.metadata.name + // +kubebuilder:validation:Optional + // +optional + optional string node = 1; +} + +// CloudPrivateIPConfigStatus specifies the node assignment together with its assignment condition. +// +k8s:openapi-gen=true +message CloudPrivateIPConfigStatus { + // node is the node name, as specified by the Kubernetes field: node.metadata.name + // +kubebuilder:validation:Optional + // +optional + optional string node = 1; + + // condition is the assignment condition of the private IP and its status + // +kubebuilder:validation:Required + // +required + repeated k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 2; +} + diff --git a/vendor/github.com/openshift/api/cloudnetwork/v1/register.go b/vendor/github.com/openshift/api/cloudnetwork/v1/register.go new file mode 100644 index 000000000..734101c8e --- /dev/null +++ b/vendor/github.com/openshift/api/cloudnetwork/v1/register.go @@ -0,0 +1,37 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + GroupName = "cloud.network.openshift.io" + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // Install is a function which adds this version to a scheme + Install = SchemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = SchemeBuilder.AddToScheme +) + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &CloudPrivateIPConfig{}, + &CloudPrivateIPConfigList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/github.com/openshift/api/cloudnetwork/v1/stable.cloudprivateipconfig.testsuite.yaml b/vendor/github.com/openshift/api/cloudnetwork/v1/stable.cloudprivateipconfig.testsuite.yaml new file mode 100644 index 000000000..9a65ba885 --- /dev/null +++ b/vendor/github.com/openshift/api/cloudnetwork/v1/stable.cloudprivateipconfig.testsuite.yaml @@ -0,0 +1,18 @@ +apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this +name: "[Stable] Cloud Network" +crd: 001-cloudprivateipconfig.crd.yaml +tests: + onCreate: + - name: Should be able to create a minimal CloudPrivateIPConfig + initial: | + apiVersion: cloud.network.openshift.io/v1 + kind: CloudPrivateIPConfig + metadata: + name: 1.2.3.4 + spec: {} # No spec is required for a CloudPrivateIPConfig + expected: | + apiVersion: cloud.network.openshift.io/v1 + kind: CloudPrivateIPConfig + metadata: + name: 1.2.3.4 + spec: {} diff --git a/vendor/github.com/openshift/api/cloudnetwork/v1/types.go b/vendor/github.com/openshift/api/cloudnetwork/v1/types.go new file mode 100644 index 000000000..78dcae092 --- /dev/null +++ b/vendor/github.com/openshift/api/cloudnetwork/v1/types.go @@ -0,0 +1,91 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// CloudPrivateIPConfig performs an assignment of a private IP address to the +// primary NIC associated with cloud VMs. This is done by specifying the IP and +// Kubernetes node which the IP should be assigned to. This CRD is intended to +// be used by the network plugin which manages the cluster network. The spec +// side represents the desired state requested by the network plugin, and the +// status side represents the current state that this CRD's controller has +// executed. No users will have permission to modify it, and if a cluster-admin +// decides to edit it for some reason, their changes will be overwritten the +// next time the network plugin reconciles the object. Note: the CR's name +// must specify the requested private IP address (can be IPv4 or IPv6). +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:openapi-gen=true +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:path=cloudprivateipconfigs,scope=Cluster +// +openshift:compatibility-gen:level=1 +type CloudPrivateIPConfig struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // spec is the definition of the desired private IP request. + // +kubebuilder:validation:Required + // +required + Spec CloudPrivateIPConfigSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + // status is the observed status of the desired private IP request. Read-only. + // +kubebuilder:validation:Optional + // +optional + Status CloudPrivateIPConfigStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// CloudPrivateIPConfigSpec consists of a node name which the private IP should be assigned to. +// +k8s:openapi-gen=true +type CloudPrivateIPConfigSpec struct { + // node is the node name, as specified by the Kubernetes field: node.metadata.name + // +kubebuilder:validation:Optional + // +optional + Node string `json:"node" protobuf:"bytes,1,opt,name=node"` +} + +// CloudPrivateIPConfigStatus specifies the node assignment together with its assignment condition. +// +k8s:openapi-gen=true +type CloudPrivateIPConfigStatus struct { + // node is the node name, as specified by the Kubernetes field: node.metadata.name + // +kubebuilder:validation:Optional + // +optional + Node string `json:"node" protobuf:"bytes,1,opt,name=node"` + // condition is the assignment condition of the private IP and its status + // +kubebuilder:validation:Required + // +required + Conditions []metav1.Condition `json:"conditions" protobuf:"bytes,2,rep,name=conditions"` +} + +// CloudPrivateIPConfigConditionType specifies the current condition type of the CloudPrivateIPConfig +type CloudPrivateIPConfigConditionType string + +const ( + // Assigned is the condition type of the cloud private IP request. + // It is paired with the following ConditionStatus: + // - True - in the case of a successful assignment + // - False - in the case of a failed assignment + // - Unknown - in the case of a pending assignment + Assigned CloudPrivateIPConfigConditionType = "Assigned" +) + +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +resource:path=cloudprivateipconfig +// CloudPrivateIPConfigList is the list of CloudPrivateIPConfigList. +// +openshift:compatibility-gen:level=1 +type CloudPrivateIPConfigList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // List of CloudPrivateIPConfig. + Items []CloudPrivateIPConfig `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/github.com/openshift/api/cloudnetwork/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/cloudnetwork/v1/zz_generated.deepcopy.go new file mode 100644 index 000000000..092825f35 --- /dev/null +++ b/vendor/github.com/openshift/api/cloudnetwork/v1/zz_generated.deepcopy.go @@ -0,0 +1,111 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudPrivateIPConfig) DeepCopyInto(out *CloudPrivateIPConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudPrivateIPConfig. +func (in *CloudPrivateIPConfig) DeepCopy() *CloudPrivateIPConfig { + if in == nil { + return nil + } + out := new(CloudPrivateIPConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CloudPrivateIPConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudPrivateIPConfigList) DeepCopyInto(out *CloudPrivateIPConfigList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CloudPrivateIPConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudPrivateIPConfigList. +func (in *CloudPrivateIPConfigList) DeepCopy() *CloudPrivateIPConfigList { + if in == nil { + return nil + } + out := new(CloudPrivateIPConfigList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CloudPrivateIPConfigList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudPrivateIPConfigSpec) DeepCopyInto(out *CloudPrivateIPConfigSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudPrivateIPConfigSpec. +func (in *CloudPrivateIPConfigSpec) DeepCopy() *CloudPrivateIPConfigSpec { + if in == nil { + return nil + } + out := new(CloudPrivateIPConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudPrivateIPConfigStatus) DeepCopyInto(out *CloudPrivateIPConfigStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudPrivateIPConfigStatus. +func (in *CloudPrivateIPConfigStatus) DeepCopy() *CloudPrivateIPConfigStatus { + if in == nil { + return nil + } + out := new(CloudPrivateIPConfigStatus) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/api/cloudnetwork/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/cloudnetwork/v1/zz_generated.swagger_doc_generated.go new file mode 100644 index 000000000..6a2f659ca --- /dev/null +++ b/vendor/github.com/openshift/api/cloudnetwork/v1/zz_generated.swagger_doc_generated.go @@ -0,0 +1,54 @@ +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_CloudPrivateIPConfig = map[string]string{ + "": "CloudPrivateIPConfig performs an assignment of a private IP address to the primary NIC associated with cloud VMs. This is done by specifying the IP and Kubernetes node which the IP should be assigned to. This CRD is intended to be used by the network plugin which manages the cluster network. The spec side represents the desired state requested by the network plugin, and the status side represents the current state that this CRD's controller has executed. No users will have permission to modify it, and if a cluster-admin decides to edit it for some reason, their changes will be overwritten the next time the network plugin reconciles the object. Note: the CR's name must specify the requested private IP address (can be IPv4 or IPv6).\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec is the definition of the desired private IP request.", + "status": "status is the observed status of the desired private IP request. Read-only.", +} + +func (CloudPrivateIPConfig) SwaggerDoc() map[string]string { + return map_CloudPrivateIPConfig +} + +var map_CloudPrivateIPConfigList = map[string]string{ + "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). CloudPrivateIPConfigList is the list of CloudPrivateIPConfigList.", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "List of CloudPrivateIPConfig.", +} + +func (CloudPrivateIPConfigList) SwaggerDoc() map[string]string { + return map_CloudPrivateIPConfigList +} + +var map_CloudPrivateIPConfigSpec = map[string]string{ + "": "CloudPrivateIPConfigSpec consists of a node name which the private IP should be assigned to.", + "node": "node is the node name, as specified by the Kubernetes field: node.metadata.name", +} + +func (CloudPrivateIPConfigSpec) SwaggerDoc() map[string]string { + return map_CloudPrivateIPConfigSpec +} + +var map_CloudPrivateIPConfigStatus = map[string]string{ + "": "CloudPrivateIPConfigStatus specifies the node assignment together with its assignment condition.", + "node": "node is the node name, as specified by the Kubernetes field: node.metadata.name", + "conditions": "condition is the assignment condition of the private IP and its status", +} + +func (CloudPrivateIPConfigStatus) SwaggerDoc() map[string]string { + return map_CloudPrivateIPConfigStatus +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/api/config/.codegen.yaml b/vendor/github.com/openshift/api/config/.codegen.yaml new file mode 100644 index 000000000..e799336fe --- /dev/null +++ b/vendor/github.com/openshift/api/config/.codegen.yaml @@ -0,0 +1,8 @@ +schemapatch: + requiredFeatureSets: + - "" + - "Default" + - "TechPreviewNoUpgrade" + - "CustomNoUpgrade" +swaggerdocs: + commentPolicy: Warn diff --git a/vendor/github.com/openshift/api/config/install.go b/vendor/github.com/openshift/api/config/install.go new file mode 100644 index 000000000..1c3c67747 --- /dev/null +++ b/vendor/github.com/openshift/api/config/install.go @@ -0,0 +1,27 @@ +package config + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + configv1 "github.com/openshift/api/config/v1" + configv1alpha1 "github.com/openshift/api/config/v1alpha1" +) + +const ( + GroupName = "config.openshift.io" +) + +var ( + schemeBuilder = runtime.NewSchemeBuilder(configv1.Install, configv1alpha1.Install) + // Install is a function which adds every version of this group to a scheme + Install = schemeBuilder.AddToScheme +) + +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +func Kind(kind string) schema.GroupKind { + return schema.GroupKind{Group: GroupName, Kind: kind} +} diff --git a/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusterversion.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusterversion.crd.yaml index de04ec4d9..69a2ed280 100644 --- a/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusterversion.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/0000_00_cluster-version-operator_01_clusterversion.crd.yaml @@ -70,6 +70,10 @@ spec: - Insights - Storage - CSISnapshot + - NodeTuning + - MachineAPI + - Build + - DeploymentConfig x-kubernetes-list-type: atomic baselineCapabilitySet: description: baselineCapabilitySet selects an initial set of optional capabilities to enable, which can be extended via additionalEnabledCapabilities. If unset, the cluster will choose a default, and the default may change over time. The current default is vCurrent. @@ -78,6 +82,8 @@ spec: - None - v4.11 - v4.12 + - v4.13 + - v4.14 - vCurrent channel: description: channel is an identifier for explicitly requesting that a non-default set of updates be applied to this cluster. The default channel will be contain stable updates that are appropriate for production clusters. @@ -189,6 +195,10 @@ spec: - Insights - Storage - CSISnapshot + - NodeTuning + - MachineAPI + - Build + - DeploymentConfig x-kubernetes-list-type: atomic knownCapabilities: description: knownCapabilities lists all the capabilities known to the current cluster. @@ -204,6 +214,10 @@ spec: - Insights - Storage - CSISnapshot + - NodeTuning + - MachineAPI + - Build + - DeploymentConfig x-kubernetes-list-type: atomic conditionalUpdates: description: conditionalUpdates contains the list of updates that may be recommended for this cluster if it meets specific required conditions. Consumers interested in the set of updates that are actually recommended for this cluster should use availableUpdates. This list may be empty if no updates are recommended, if the update service is unavailable, or if an empty or invalid channel has been specified. diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver-CustomNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver-CustomNoUpgrade.crd.yaml new file mode 100644 index 000000000..1895f9d33 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver-CustomNoUpgrade.crd.yaml @@ -0,0 +1,179 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + release.openshift.io/feature-set: CustomNoUpgrade + name: apiservers.config.openshift.io +spec: + group: config.openshift.io + names: + kind: APIServer + listKind: APIServerList + plural: apiservers + singular: apiserver + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "APIServer holds configuration (like serving certificates, client CA and CORS domains) shared by all API servers in the system, among them especially kube-apiserver and openshift-apiserver. The canonical name of an instance is 'cluster'. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + type: object + properties: + additionalCORSAllowedOrigins: + description: additionalCORSAllowedOrigins lists additional, user-defined regular expressions describing hosts for which the API server allows access using the CORS headers. This may be needed to access the API and the integrated OAuth server from JavaScript applications. The values are regular expressions that correspond to the Golang regular expression language. + type: array + items: + type: string + audit: + description: audit specifies the settings for audit configuration to be applied to all OpenShift-provided API servers in the cluster. + type: object + default: + profile: Default + properties: + customRules: + description: customRules specify profiles per group. These profile take precedence over the top-level profile field if they apply. They are evaluation from top to bottom and the first one that matches, applies. + type: array + items: + description: AuditCustomRule describes a custom rule for an audit profile that takes precedence over the top-level profile. + type: object + required: + - group + - profile + properties: + group: + description: group is a name of group a request user must be member of in order to this profile to apply. + type: string + minLength: 1 + profile: + description: "profile specifies the name of the desired audit policy configuration to be deployed to all OpenShift-provided API servers in the cluster. \n The following profiles are provided: - Default: the existing default policy. - WriteRequestBodies: like 'Default', but logs request and response HTTP payloads for write requests (create, update, patch). - AllRequestBodies: like 'WriteRequestBodies', but also logs request and response HTTP payloads for read requests (get, list). - None: no requests are logged at all, not even oauthaccesstokens and oauthauthorizetokens. \n If unset, the 'Default' profile is used as the default." + type: string + enum: + - Default + - WriteRequestBodies + - AllRequestBodies + - None + x-kubernetes-list-map-keys: + - group + x-kubernetes-list-type: map + profile: + description: "profile specifies the name of the desired top-level audit profile to be applied to all requests sent to any of the OpenShift-provided API servers in the cluster (kube-apiserver, openshift-apiserver and oauth-apiserver), with the exception of those requests that match one or more of the customRules. \n The following profiles are provided: - Default: default policy which means MetaData level logging with the exception of events (not logged at all), oauthaccesstokens and oauthauthorizetokens (both logged at RequestBody level). - WriteRequestBodies: like 'Default', but logs request and response HTTP payloads for write requests (create, update, patch). - AllRequestBodies: like 'WriteRequestBodies', but also logs request and response HTTP payloads for read requests (get, list). - None: no requests are logged at all, not even oauthaccesstokens and oauthauthorizetokens. \n Warning: It is not recommended to disable audit logging by using the `None` profile unless you are fully aware of the risks of not logging data that can be beneficial when troubleshooting issues. If you disable audit logging and a support situation arises, you might need to enable audit logging and reproduce the issue in order to troubleshoot properly. \n If unset, the 'Default' profile is used as the default." + type: string + default: Default + enum: + - Default + - WriteRequestBodies + - AllRequestBodies + - None + clientCA: + description: 'clientCA references a ConfigMap containing a certificate bundle for the signers that will be recognized for incoming client certificates in addition to the operator managed signers. If this is empty, then only operator managed signers are valid. You usually only have to set this if you have your own PKI you wish to honor client certificates from. The ConfigMap must exist in the openshift-config namespace and contain the following required fields: - ConfigMap.Data["ca-bundle.crt"] - CA bundle.' + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced config map + type: string + encryption: + description: encryption allows the configuration of encryption of resources at the datastore layer. + type: object + properties: + type: + description: "type defines what encryption type should be used to encrypt resources at the datastore layer. When this field is unset (i.e. when it is set to the empty string), identity is implied. The behavior of unset can and will change over time. Even if encryption is enabled by default, the meaning of unset may change to a different encryption type based on changes in best practices. \n When encryption is enabled, all sensitive resources shipped with the platform are encrypted. This list of sensitive resources can and will change over time. The current authoritative list is: \n 1. secrets 2. configmaps 3. routes.route.openshift.io 4. oauthaccesstokens.oauth.openshift.io 5. oauthauthorizetokens.oauth.openshift.io" + type: string + enum: + - "" + - identity + - aescbc + - aesgcm + servingCerts: + description: servingCert is the TLS cert info for serving secure traffic. If not specified, operator managed certificates will be used for serving secure traffic. + type: object + properties: + namedCertificates: + description: namedCertificates references secrets containing the TLS cert info for serving secure traffic to specific hostnames. If no named certificates are provided, or no named certificates match the server name as understood by a client, the defaultServingCertificate will be used. + type: array + items: + description: APIServerNamedServingCert maps a server DNS name, as understood by a client, to a certificate. + type: object + properties: + names: + description: names is a optional list of explicit DNS names (leading wildcards allowed) that should use this certificate to serve secure traffic. If no names are provided, the implicit names will be extracted from the certificates. Exact names trump over wildcard names. Explicit names defined here trump over extracted implicit names. + type: array + items: + type: string + servingCertificate: + description: 'servingCertificate references a kubernetes.io/tls type secret containing the TLS cert info for serving secure traffic. The secret must exist in the openshift-config namespace and contain the following required fields: - Secret.Data["tls.key"] - TLS private key. - Secret.Data["tls.crt"] - TLS certificate.' + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + tlsSecurityProfile: + description: "tlsSecurityProfile specifies settings for TLS connections for externally exposed servers. \n If unset, a default (which may change between releases) is chosen. Note that only Old, Intermediate and Custom profiles are currently supported, and the maximum available MinTLSVersions is VersionTLS12." + type: object + properties: + custom: + description: "custom is a user-defined TLS security profile. Be extremely careful using a custom profile as invalid configurations can be catastrophic. An example custom profile looks like this: \n ciphers: - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 - ECDHE-RSA-AES128-GCM-SHA256 - ECDHE-ECDSA-AES128-GCM-SHA256 minTLSVersion: TLSv1.1" + type: object + properties: + ciphers: + description: "ciphers is used to specify the cipher algorithms that are negotiated during the TLS handshake. Operators may remove entries their operands do not support. For example, to use DES-CBC3-SHA (yaml): \n ciphers: - DES-CBC3-SHA" + type: array + items: + type: string + minTLSVersion: + description: "minTLSVersion is used to specify the minimal version of the TLS protocol that is negotiated during the TLS handshake. For example, to use TLS versions 1.1, 1.2 and 1.3 (yaml): \n minTLSVersion: TLSv1.1 \n NOTE: currently the highest minTLSVersion allowed is VersionTLS12" + type: string + enum: + - VersionTLS10 + - VersionTLS11 + - VersionTLS12 + - VersionTLS13 + nullable: true + intermediate: + description: "intermediate is a TLS security profile based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28recommended.29 \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256 - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256 - ECDHE-ECDSA-AES128-GCM-SHA256 - ECDHE-RSA-AES128-GCM-SHA256 - ECDHE-ECDSA-AES256-GCM-SHA384 - ECDHE-RSA-AES256-GCM-SHA384 - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 - DHE-RSA-AES128-GCM-SHA256 - DHE-RSA-AES256-GCM-SHA384 minTLSVersion: TLSv1.2" + type: object + nullable: true + modern: + description: "modern is a TLS security profile based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256 - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256 minTLSVersion: TLSv1.3 \n NOTE: Currently unsupported." + type: object + nullable: true + old: + description: "old is a TLS security profile based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256 - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256 - ECDHE-ECDSA-AES128-GCM-SHA256 - ECDHE-RSA-AES128-GCM-SHA256 - ECDHE-ECDSA-AES256-GCM-SHA384 - ECDHE-RSA-AES256-GCM-SHA384 - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 - DHE-RSA-AES128-GCM-SHA256 - DHE-RSA-AES256-GCM-SHA384 - DHE-RSA-CHACHA20-POLY1305 - ECDHE-ECDSA-AES128-SHA256 - ECDHE-RSA-AES128-SHA256 - ECDHE-ECDSA-AES128-SHA - ECDHE-RSA-AES128-SHA - ECDHE-ECDSA-AES256-SHA384 - ECDHE-RSA-AES256-SHA384 - ECDHE-ECDSA-AES256-SHA - ECDHE-RSA-AES256-SHA - DHE-RSA-AES128-SHA256 - DHE-RSA-AES256-SHA256 - AES128-GCM-SHA256 - AES256-GCM-SHA384 - AES128-SHA256 - AES256-SHA256 - AES128-SHA - AES256-SHA - DES-CBC3-SHA minTLSVersion: TLSv1.0" + type: object + nullable: true + type: + description: "type is one of Old, Intermediate, Modern or Custom. Custom provides the ability to specify individual TLS security profile parameters. Old, Intermediate and Modern are TLS security profiles based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Recommended_configurations \n The profiles are intent based, so they may change over time as new ciphers are developed and existing ciphers are found to be insecure. Depending on precisely which ciphers are available to a process, the list may be reduced. \n Note that the Modern profile is currently not supported because it is not yet well adopted by common software libraries." + type: string + enum: + - Old + - Intermediate + - Modern + - Custom + status: + description: status holds observed values from the cluster. They may not be overridden. + type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver-Default.crd.yaml similarity index 99% rename from vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver.crd.yaml rename to vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver-Default.crd.yaml index 3e53b28b9..7edc7f23a 100644 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver-Default.crd.yaml @@ -6,6 +6,7 @@ metadata: include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" include.release.openshift.io/single-node-developer: "true" + release.openshift.io/feature-set: Default name: apiservers.config.openshift.io spec: group: config.openshift.io @@ -101,6 +102,7 @@ spec: - "" - identity - aescbc + - aesgcm servingCerts: description: servingCert is the TLS cert info for serving secure traffic. If not specified, operator managed certificates will be used for serving secure traffic. type: object diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver-TechPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver-TechPreviewNoUpgrade.crd.yaml new file mode 100644 index 000000000..8ce5214c1 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_apiserver-TechPreviewNoUpgrade.crd.yaml @@ -0,0 +1,179 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + release.openshift.io/feature-set: TechPreviewNoUpgrade + name: apiservers.config.openshift.io +spec: + group: config.openshift.io + names: + kind: APIServer + listKind: APIServerList + plural: apiservers + singular: apiserver + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "APIServer holds configuration (like serving certificates, client CA and CORS domains) shared by all API servers in the system, among them especially kube-apiserver and openshift-apiserver. The canonical name of an instance is 'cluster'. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + type: object + properties: + additionalCORSAllowedOrigins: + description: additionalCORSAllowedOrigins lists additional, user-defined regular expressions describing hosts for which the API server allows access using the CORS headers. This may be needed to access the API and the integrated OAuth server from JavaScript applications. The values are regular expressions that correspond to the Golang regular expression language. + type: array + items: + type: string + audit: + description: audit specifies the settings for audit configuration to be applied to all OpenShift-provided API servers in the cluster. + type: object + default: + profile: Default + properties: + customRules: + description: customRules specify profiles per group. These profile take precedence over the top-level profile field if they apply. They are evaluation from top to bottom and the first one that matches, applies. + type: array + items: + description: AuditCustomRule describes a custom rule for an audit profile that takes precedence over the top-level profile. + type: object + required: + - group + - profile + properties: + group: + description: group is a name of group a request user must be member of in order to this profile to apply. + type: string + minLength: 1 + profile: + description: "profile specifies the name of the desired audit policy configuration to be deployed to all OpenShift-provided API servers in the cluster. \n The following profiles are provided: - Default: the existing default policy. - WriteRequestBodies: like 'Default', but logs request and response HTTP payloads for write requests (create, update, patch). - AllRequestBodies: like 'WriteRequestBodies', but also logs request and response HTTP payloads for read requests (get, list). - None: no requests are logged at all, not even oauthaccesstokens and oauthauthorizetokens. \n If unset, the 'Default' profile is used as the default." + type: string + enum: + - Default + - WriteRequestBodies + - AllRequestBodies + - None + x-kubernetes-list-map-keys: + - group + x-kubernetes-list-type: map + profile: + description: "profile specifies the name of the desired top-level audit profile to be applied to all requests sent to any of the OpenShift-provided API servers in the cluster (kube-apiserver, openshift-apiserver and oauth-apiserver), with the exception of those requests that match one or more of the customRules. \n The following profiles are provided: - Default: default policy which means MetaData level logging with the exception of events (not logged at all), oauthaccesstokens and oauthauthorizetokens (both logged at RequestBody level). - WriteRequestBodies: like 'Default', but logs request and response HTTP payloads for write requests (create, update, patch). - AllRequestBodies: like 'WriteRequestBodies', but also logs request and response HTTP payloads for read requests (get, list). - None: no requests are logged at all, not even oauthaccesstokens and oauthauthorizetokens. \n Warning: It is not recommended to disable audit logging by using the `None` profile unless you are fully aware of the risks of not logging data that can be beneficial when troubleshooting issues. If you disable audit logging and a support situation arises, you might need to enable audit logging and reproduce the issue in order to troubleshoot properly. \n If unset, the 'Default' profile is used as the default." + type: string + default: Default + enum: + - Default + - WriteRequestBodies + - AllRequestBodies + - None + clientCA: + description: 'clientCA references a ConfigMap containing a certificate bundle for the signers that will be recognized for incoming client certificates in addition to the operator managed signers. If this is empty, then only operator managed signers are valid. You usually only have to set this if you have your own PKI you wish to honor client certificates from. The ConfigMap must exist in the openshift-config namespace and contain the following required fields: - ConfigMap.Data["ca-bundle.crt"] - CA bundle.' + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced config map + type: string + encryption: + description: encryption allows the configuration of encryption of resources at the datastore layer. + type: object + properties: + type: + description: "type defines what encryption type should be used to encrypt resources at the datastore layer. When this field is unset (i.e. when it is set to the empty string), identity is implied. The behavior of unset can and will change over time. Even if encryption is enabled by default, the meaning of unset may change to a different encryption type based on changes in best practices. \n When encryption is enabled, all sensitive resources shipped with the platform are encrypted. This list of sensitive resources can and will change over time. The current authoritative list is: \n 1. secrets 2. configmaps 3. routes.route.openshift.io 4. oauthaccesstokens.oauth.openshift.io 5. oauthauthorizetokens.oauth.openshift.io" + type: string + enum: + - "" + - identity + - aescbc + - aesgcm + servingCerts: + description: servingCert is the TLS cert info for serving secure traffic. If not specified, operator managed certificates will be used for serving secure traffic. + type: object + properties: + namedCertificates: + description: namedCertificates references secrets containing the TLS cert info for serving secure traffic to specific hostnames. If no named certificates are provided, or no named certificates match the server name as understood by a client, the defaultServingCertificate will be used. + type: array + items: + description: APIServerNamedServingCert maps a server DNS name, as understood by a client, to a certificate. + type: object + properties: + names: + description: names is a optional list of explicit DNS names (leading wildcards allowed) that should use this certificate to serve secure traffic. If no names are provided, the implicit names will be extracted from the certificates. Exact names trump over wildcard names. Explicit names defined here trump over extracted implicit names. + type: array + items: + type: string + servingCertificate: + description: 'servingCertificate references a kubernetes.io/tls type secret containing the TLS cert info for serving secure traffic. The secret must exist in the openshift-config namespace and contain the following required fields: - Secret.Data["tls.key"] - TLS private key. - Secret.Data["tls.crt"] - TLS certificate.' + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + tlsSecurityProfile: + description: "tlsSecurityProfile specifies settings for TLS connections for externally exposed servers. \n If unset, a default (which may change between releases) is chosen. Note that only Old, Intermediate and Custom profiles are currently supported, and the maximum available MinTLSVersions is VersionTLS12." + type: object + properties: + custom: + description: "custom is a user-defined TLS security profile. Be extremely careful using a custom profile as invalid configurations can be catastrophic. An example custom profile looks like this: \n ciphers: - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 - ECDHE-RSA-AES128-GCM-SHA256 - ECDHE-ECDSA-AES128-GCM-SHA256 minTLSVersion: TLSv1.1" + type: object + properties: + ciphers: + description: "ciphers is used to specify the cipher algorithms that are negotiated during the TLS handshake. Operators may remove entries their operands do not support. For example, to use DES-CBC3-SHA (yaml): \n ciphers: - DES-CBC3-SHA" + type: array + items: + type: string + minTLSVersion: + description: "minTLSVersion is used to specify the minimal version of the TLS protocol that is negotiated during the TLS handshake. For example, to use TLS versions 1.1, 1.2 and 1.3 (yaml): \n minTLSVersion: TLSv1.1 \n NOTE: currently the highest minTLSVersion allowed is VersionTLS12" + type: string + enum: + - VersionTLS10 + - VersionTLS11 + - VersionTLS12 + - VersionTLS13 + nullable: true + intermediate: + description: "intermediate is a TLS security profile based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Intermediate_compatibility_.28recommended.29 \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256 - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256 - ECDHE-ECDSA-AES128-GCM-SHA256 - ECDHE-RSA-AES128-GCM-SHA256 - ECDHE-ECDSA-AES256-GCM-SHA384 - ECDHE-RSA-AES256-GCM-SHA384 - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 - DHE-RSA-AES128-GCM-SHA256 - DHE-RSA-AES256-GCM-SHA384 minTLSVersion: TLSv1.2" + type: object + nullable: true + modern: + description: "modern is a TLS security profile based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Modern_compatibility \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256 - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256 minTLSVersion: TLSv1.3 \n NOTE: Currently unsupported." + type: object + nullable: true + old: + description: "old is a TLS security profile based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Old_backward_compatibility \n and looks like this (yaml): \n ciphers: - TLS_AES_128_GCM_SHA256 - TLS_AES_256_GCM_SHA384 - TLS_CHACHA20_POLY1305_SHA256 - ECDHE-ECDSA-AES128-GCM-SHA256 - ECDHE-RSA-AES128-GCM-SHA256 - ECDHE-ECDSA-AES256-GCM-SHA384 - ECDHE-RSA-AES256-GCM-SHA384 - ECDHE-ECDSA-CHACHA20-POLY1305 - ECDHE-RSA-CHACHA20-POLY1305 - DHE-RSA-AES128-GCM-SHA256 - DHE-RSA-AES256-GCM-SHA384 - DHE-RSA-CHACHA20-POLY1305 - ECDHE-ECDSA-AES128-SHA256 - ECDHE-RSA-AES128-SHA256 - ECDHE-ECDSA-AES128-SHA - ECDHE-RSA-AES128-SHA - ECDHE-ECDSA-AES256-SHA384 - ECDHE-RSA-AES256-SHA384 - ECDHE-ECDSA-AES256-SHA - ECDHE-RSA-AES256-SHA - DHE-RSA-AES128-SHA256 - DHE-RSA-AES256-SHA256 - AES128-GCM-SHA256 - AES256-GCM-SHA384 - AES128-SHA256 - AES256-SHA256 - AES128-SHA - AES256-SHA - DES-CBC3-SHA minTLSVersion: TLSv1.0" + type: object + nullable: true + type: + description: "type is one of Old, Intermediate, Modern or Custom. Custom provides the ability to specify individual TLS security profile parameters. Old, Intermediate and Modern are TLS security profiles based on: \n https://wiki.mozilla.org/Security/Server_Side_TLS#Recommended_configurations \n The profiles are intent based, so they may change over time as new ciphers are developed and existing ciphers are found to be insecure. Depending on precisely which ciphers are available to a process, the list may be reduced. \n Note that the Modern profile is currently not supported because it is not yet well adopted by common software libraries." + type: string + enum: + - Old + - Intermediate + - Modern + - Custom + status: + description: status holds observed values from the cluster. They may not be overridden. + type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_build.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_build.crd.yaml index 89bc65581..75166deb7 100644 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_build.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_build.crd.yaml @@ -204,6 +204,21 @@ spec: description: Resources defines resource requirements to execute the build. type: object properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable. It can only be set for containers." + type: array + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + type: object + required: + - name + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map limits: description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object @@ -214,7 +229,7 @@ spec: - type: string x-kubernetes-int-or-string: true requests: - description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' type: object additionalProperties: pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_dns-CustomNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_dns-CustomNoUpgrade.crd.yaml new file mode 100644 index 000000000..9da62cbfe --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_dns-CustomNoUpgrade.crd.yaml @@ -0,0 +1,114 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + release.openshift.io/feature-set: CustomNoUpgrade + name: dnses.config.openshift.io +spec: + group: config.openshift.io + names: + kind: DNS + listKind: DNSList + plural: dnses + singular: dns + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "DNS holds cluster-wide information about DNS. The canonical name is `cluster` \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + type: object + properties: + baseDomain: + description: "baseDomain is the base domain of the cluster. All managed DNS records will be sub-domains of this base. \n For example, given the base domain `openshift.example.com`, an API server DNS record may be created for `cluster-api.openshift.example.com`. \n Once set, this field cannot be changed." + type: string + platform: + description: platform holds configuration specific to the underlying infrastructure provider for DNS. When omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. + type: object + required: + - type + properties: + aws: + description: aws contains DNS configuration specific to the Amazon Web Services cloud provider. + type: object + properties: + privateZoneIAMRole: + description: privateZoneIAMRole contains the ARN of an IAM role that should be assumed when performing operations on the cluster's private hosted zone specified in the cluster DNS config. When left empty, no role should be assumed. + type: string + pattern: ^arn:(aws|aws-cn|aws-us-gov):iam::[0-9]{12}:role\/.*$ + type: + description: "type is the underlying infrastructure provider for the cluster. Allowed values: \"\", \"AWS\". \n Individual components may not support all platforms, and must handle unrecognized platforms with best-effort defaults." + type: string + enum: + - "" + - AWS + - Azure + - BareMetal + - GCP + - Libvirt + - OpenStack + - None + - VSphere + - oVirt + - IBMCloud + - KubeVirt + - EquinixMetal + - PowerVS + - AlibabaCloud + - Nutanix + - External + x-kubernetes-validations: + - rule: self in ['','AWS'] + message: allowed values are '' and 'AWS' + x-kubernetes-validations: + - rule: 'has(self.type) && self.type == ''AWS'' ? has(self.aws) : !has(self.aws)' + message: aws configuration is required when platform is AWS, and forbidden otherwise + privateZone: + description: "privateZone is the location where all the DNS records that are only available internally to the cluster exist. \n If this field is nil, no private records should be created. \n Once set, this field cannot be changed." + type: object + properties: + id: + description: "id is the identifier that can be used to find the DNS hosted zone. \n on AWS zone can be fetched using `ID` as id in [1] on Azure zone can be fetched using `ID` as a pre-determined name in [2], on GCP zone can be fetched using `ID` as a pre-determined name in [3]. \n [1]: https://docs.aws.amazon.com/cli/latest/reference/route53/get-hosted-zone.html#options [2]: https://docs.microsoft.com/en-us/cli/azure/network/dns/zone?view=azure-cli-latest#az-network-dns-zone-show [3]: https://cloud.google.com/dns/docs/reference/v1/managedZones/get" + type: string + tags: + description: "tags can be used to query the DNS hosted zone. \n on AWS, resourcegroupstaggingapi [1] can be used to fetch a zone using `Tags` as tag-filters, \n [1]: https://docs.aws.amazon.com/cli/latest/reference/resourcegroupstaggingapi/get-resources.html#options" + type: object + additionalProperties: + type: string + publicZone: + description: "publicZone is the location where all the DNS records that are publicly accessible to the internet exist. \n If this field is nil, no public records should be created. \n Once set, this field cannot be changed." + type: object + properties: + id: + description: "id is the identifier that can be used to find the DNS hosted zone. \n on AWS zone can be fetched using `ID` as id in [1] on Azure zone can be fetched using `ID` as a pre-determined name in [2], on GCP zone can be fetched using `ID` as a pre-determined name in [3]. \n [1]: https://docs.aws.amazon.com/cli/latest/reference/route53/get-hosted-zone.html#options [2]: https://docs.microsoft.com/en-us/cli/azure/network/dns/zone?view=azure-cli-latest#az-network-dns-zone-show [3]: https://cloud.google.com/dns/docs/reference/v1/managedZones/get" + type: string + tags: + description: "tags can be used to query the DNS hosted zone. \n on AWS, resourcegroupstaggingapi [1] can be used to fetch a zone using `Tags` as tag-filters, \n [1]: https://docs.aws.amazon.com/cli/latest/reference/resourcegroupstaggingapi/get-resources.html#options" + type: object + additionalProperties: + type: string + status: + description: status holds observed values from the cluster. They may not be overridden. + type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_dns.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_dns-Default.crd.yaml similarity index 68% rename from vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_dns.crd.yaml rename to vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_dns-Default.crd.yaml index e4fa56eee..62080e10e 100644 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_dns.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_dns-Default.crd.yaml @@ -6,6 +6,7 @@ metadata: include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" include.release.openshift.io/single-node-developer: "true" + release.openshift.io/feature-set: Default name: dnses.config.openshift.io spec: group: config.openshift.io @@ -39,6 +40,47 @@ spec: baseDomain: description: "baseDomain is the base domain of the cluster. All managed DNS records will be sub-domains of this base. \n For example, given the base domain `openshift.example.com`, an API server DNS record may be created for `cluster-api.openshift.example.com`. \n Once set, this field cannot be changed." type: string + platform: + description: platform holds configuration specific to the underlying infrastructure provider for DNS. When omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. + type: object + required: + - type + properties: + aws: + description: aws contains DNS configuration specific to the Amazon Web Services cloud provider. + type: object + properties: + privateZoneIAMRole: + description: privateZoneIAMRole contains the ARN of an IAM role that should be assumed when performing operations on the cluster's private hosted zone specified in the cluster DNS config. When left empty, no role should be assumed. + type: string + pattern: ^arn:(aws|aws-cn|aws-us-gov):iam::[0-9]{12}:role\/.*$ + type: + description: "type is the underlying infrastructure provider for the cluster. Allowed values: \"\", \"AWS\". \n Individual components may not support all platforms, and must handle unrecognized platforms with best-effort defaults." + type: string + enum: + - "" + - AWS + - Azure + - BareMetal + - GCP + - Libvirt + - OpenStack + - None + - VSphere + - oVirt + - IBMCloud + - KubeVirt + - EquinixMetal + - PowerVS + - AlibabaCloud + - Nutanix + - External + x-kubernetes-validations: + - rule: self in ['','AWS'] + message: allowed values are '' and 'AWS' + x-kubernetes-validations: + - rule: 'has(self.type) && self.type == ''AWS'' ? has(self.aws) : !has(self.aws)' + message: aws configuration is required when platform is AWS, and forbidden otherwise privateZone: description: "privateZone is the location where all the DNS records that are only available internally to the cluster exist. \n If this field is nil, no private records should be created. \n Once set, this field cannot be changed." type: object diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_dns-TechPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_dns-TechPreviewNoUpgrade.crd.yaml new file mode 100644 index 000000000..043b6fc60 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_dns-TechPreviewNoUpgrade.crd.yaml @@ -0,0 +1,114 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + release.openshift.io/feature-set: TechPreviewNoUpgrade + name: dnses.config.openshift.io +spec: + group: config.openshift.io + names: + kind: DNS + listKind: DNSList + plural: dnses + singular: dns + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "DNS holds cluster-wide information about DNS. The canonical name is `cluster` \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + type: object + properties: + baseDomain: + description: "baseDomain is the base domain of the cluster. All managed DNS records will be sub-domains of this base. \n For example, given the base domain `openshift.example.com`, an API server DNS record may be created for `cluster-api.openshift.example.com`. \n Once set, this field cannot be changed." + type: string + platform: + description: platform holds configuration specific to the underlying infrastructure provider for DNS. When omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. + type: object + required: + - type + properties: + aws: + description: aws contains DNS configuration specific to the Amazon Web Services cloud provider. + type: object + properties: + privateZoneIAMRole: + description: privateZoneIAMRole contains the ARN of an IAM role that should be assumed when performing operations on the cluster's private hosted zone specified in the cluster DNS config. When left empty, no role should be assumed. + type: string + pattern: ^arn:(aws|aws-cn|aws-us-gov):iam::[0-9]{12}:role\/.*$ + type: + description: "type is the underlying infrastructure provider for the cluster. Allowed values: \"\", \"AWS\". \n Individual components may not support all platforms, and must handle unrecognized platforms with best-effort defaults." + type: string + enum: + - "" + - AWS + - Azure + - BareMetal + - GCP + - Libvirt + - OpenStack + - None + - VSphere + - oVirt + - IBMCloud + - KubeVirt + - EquinixMetal + - PowerVS + - AlibabaCloud + - Nutanix + - External + x-kubernetes-validations: + - rule: self in ['','AWS'] + message: allowed values are '' and 'AWS' + x-kubernetes-validations: + - rule: 'has(self.type) && self.type == ''AWS'' ? has(self.aws) : !has(self.aws)' + message: aws configuration is required when platform is AWS, and forbidden otherwise + privateZone: + description: "privateZone is the location where all the DNS records that are only available internally to the cluster exist. \n If this field is nil, no private records should be created. \n Once set, this field cannot be changed." + type: object + properties: + id: + description: "id is the identifier that can be used to find the DNS hosted zone. \n on AWS zone can be fetched using `ID` as id in [1] on Azure zone can be fetched using `ID` as a pre-determined name in [2], on GCP zone can be fetched using `ID` as a pre-determined name in [3]. \n [1]: https://docs.aws.amazon.com/cli/latest/reference/route53/get-hosted-zone.html#options [2]: https://docs.microsoft.com/en-us/cli/azure/network/dns/zone?view=azure-cli-latest#az-network-dns-zone-show [3]: https://cloud.google.com/dns/docs/reference/v1/managedZones/get" + type: string + tags: + description: "tags can be used to query the DNS hosted zone. \n on AWS, resourcegroupstaggingapi [1] can be used to fetch a zone using `Tags` as tag-filters, \n [1]: https://docs.aws.amazon.com/cli/latest/reference/resourcegroupstaggingapi/get-resources.html#options" + type: object + additionalProperties: + type: string + publicZone: + description: "publicZone is the location where all the DNS records that are publicly accessible to the internet exist. \n If this field is nil, no public records should be created. \n Once set, this field cannot be changed." + type: object + properties: + id: + description: "id is the identifier that can be used to find the DNS hosted zone. \n on AWS zone can be fetched using `ID` as id in [1] on Azure zone can be fetched using `ID` as a pre-determined name in [2], on GCP zone can be fetched using `ID` as a pre-determined name in [3]. \n [1]: https://docs.aws.amazon.com/cli/latest/reference/route53/get-hosted-zone.html#options [2]: https://docs.microsoft.com/en-us/cli/azure/network/dns/zone?view=azure-cli-latest#az-network-dns-zone-show [3]: https://cloud.google.com/dns/docs/reference/v1/managedZones/get" + type: string + tags: + description: "tags can be used to query the DNS hosted zone. \n on AWS, resourcegroupstaggingapi [1] can be used to fetch a zone using `Tags` as tag-filters, \n [1]: https://docs.aws.amazon.com/cli/latest/reference/resourcegroupstaggingapi/get-resources.html#options" + type: object + additionalProperties: + type: string + status: + description: status holds observed values from the cluster. They may not be overridden. + type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_featuregate.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_featuregate.crd.yaml index 5254d0ce2..77e01b8a7 100644 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_featuregate.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_featuregate.crd.yaml @@ -44,12 +44,16 @@ spec: description: disabled is a list of all feature gates that you want to force off type: array items: + description: FeatureGateName is a string to enforce patterns on the name of a FeatureGate type: string + pattern: ^([A-Za-z0-9-]+\.)*[A-Za-z0-9-]+\.?$ enabled: description: enabled is a list of all feature gates that you want to force on type: array items: + description: FeatureGateName is a string to enforce patterns on the name of a FeatureGate type: string + pattern: ^([A-Za-z0-9-]+\.)*[A-Za-z0-9-]+\.?$ nullable: true featureSet: description: featureSet changes the list of features in the cluster. The default is empty. Be very careful adjusting this setting. Turning on or off features may cause irreversible changes in your cluster which cannot be undone. @@ -57,6 +61,92 @@ spec: status: description: status holds observed values from the cluster. They may not be overridden. type: object + properties: + conditions: + description: 'conditions represent the observations of the current state. Known .status.conditions.type are: "DeterminationDegraded"' + type: array + items: + description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, \n type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + type: object + required: + - lastTransitionTime + - message + - reason + - status + - type + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + type: string + format: date-time + message: + description: message is a human readable message indicating details about the transition. This may be an empty string. + type: string + maxLength: 32768 + observedGeneration: + description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. + type: integer + format: int64 + minimum: 0 + reason: + description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. + type: string + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + status: + description: status of the condition, one of True, False, Unknown. + type: string + enum: + - "True" + - "False" + - Unknown + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + type: string + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + featureGates: + description: featureGates contains a list of enabled and disabled featureGates that are keyed by payloadVersion. Operators other than the CVO and cluster-config-operator, must read the .status.featureGates, locate the version they are managing, find the enabled/disabled featuregates and make the operand and operator match. The enabled/disabled values for a particular version may change during the life of the cluster as various .spec.featureSet values are selected. Operators may choose to restart their processes to pick up these changes, but remembering past enable/disable lists is beyond the scope of this API and is the responsibility of individual operators. Only featureGates with .version in the ClusterVersion.status will be present in this list. + type: array + items: + type: object + required: + - version + properties: + disabled: + description: disabled is a list of all feature gates that are disabled in the cluster for the named version. + type: array + items: + type: object + required: + - name + properties: + name: + description: name is the name of the FeatureGate. + type: string + pattern: ^([A-Za-z0-9-]+\.)*[A-Za-z0-9-]+\.?$ + enabled: + description: enabled is a list of all feature gates that are enabled in the cluster for the named version. + type: array + items: + type: object + required: + - name + properties: + name: + description: name is the name of the FeatureGate. + type: string + pattern: ^([A-Za-z0-9-]+\.)*[A-Za-z0-9-]+\.?$ + version: + description: version matches the version provided by the ClusterVersion and in the ClusterOperator.Status.Versions field. + type: string + x-kubernetes-list-map-keys: + - version + x-kubernetes-list-type: map served: true storage: true subresources: diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure-CustomNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure-CustomNoUpgrade.crd.yaml new file mode 100644 index 000000000..4c6d4c074 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure-CustomNoUpgrade.crd.yaml @@ -0,0 +1,999 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + release.openshift.io/feature-set: CustomNoUpgrade + name: infrastructures.config.openshift.io +spec: + group: config.openshift.io + names: + kind: Infrastructure + listKind: InfrastructureList + plural: infrastructures + singular: infrastructure + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "Infrastructure holds cluster-wide information about Infrastructure. The canonical name is `cluster` \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + properties: + cloudConfig: + description: "cloudConfig is a reference to a ConfigMap containing the cloud provider configuration file. This configuration file is used to configure the Kubernetes cloud provider integration when using the built-in cloud provider integration or the external cloud controller manager. The namespace for this config map is openshift-config. \n cloudConfig should only be consumed by the kube_cloud_config controller. The controller is responsible for using the user configuration in the spec for various platforms and combining that with the user provided ConfigMap in this field to create a stitched kube cloud config. The controller generates a ConfigMap `kube-cloud-config` in `openshift-config-managed` namespace with the kube cloud config is stored in `cloud.conf` key. All the clients are expected to use the generated ConfigMap only." + properties: + key: + description: Key allows pointing to a specific key/value inside of the configmap. This is useful for logical file references. + type: string + name: + type: string + type: object + platformSpec: + description: platformSpec holds desired information specific to the underlying infrastructure provider. + properties: + alibabaCloud: + description: AlibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider. + type: object + aws: + description: AWS contains settings specific to the Amazon Web Services infrastructure provider. + properties: + serviceEndpoints: + description: serviceEndpoints list contains custom endpoints which will override default service endpoint of AWS Services. There must be only one ServiceEndpoint for a service. + items: + description: AWSServiceEndpoint store the configuration of a custom url to override existing defaults of AWS Services. + properties: + name: + description: name is the name of the AWS service. The list of all the service names can be found at https://docs.aws.amazon.com/general/latest/gr/aws-service-information.html This must be provided and cannot be empty. + pattern: ^[a-z0-9-]+$ + type: string + url: + description: url is fully qualified URI with scheme https, that overrides the default generated endpoint for a client. This must be provided and cannot be empty. + pattern: ^https:// + type: string + type: object + type: array + type: object + azure: + description: Azure contains settings specific to the Azure infrastructure provider. + type: object + baremetal: + description: BareMetal contains settings specific to the BareMetal platform. + type: object + equinixMetal: + description: EquinixMetal contains settings specific to the Equinix Metal infrastructure provider. + type: object + external: + description: ExternalPlatformType represents generic infrastructure provider. Platform-specific components should be supplemented separately. + properties: + platformName: + default: Unknown + description: PlatformName holds the arbitrary string representing the infrastructure provider name, expected to be set at the installation time. This field is solely for informational and reporting purposes and is not expected to be used for decision-making. + type: string + x-kubernetes-validations: + - message: platform name cannot be changed once set + rule: oldSelf == 'Unknown' || self == oldSelf + type: object + gcp: + description: GCP contains settings specific to the Google Cloud Platform infrastructure provider. + type: object + ibmcloud: + description: IBMCloud contains settings specific to the IBMCloud infrastructure provider. + type: object + kubevirt: + description: Kubevirt contains settings specific to the kubevirt infrastructure provider. + type: object + nutanix: + description: Nutanix contains settings specific to the Nutanix infrastructure provider. + properties: + prismCentral: + description: prismCentral holds the endpoint address and port to access the Nutanix Prism Central. When a cluster-wide proxy is installed, by default, this endpoint will be accessed via the proxy. Should you wish for communication with this endpoint not to be proxied, please add the endpoint to the proxy spec.noProxy list. + properties: + address: + description: address is the endpoint address (DNS name or IP address) of the Nutanix Prism Central or Element (cluster) + maxLength: 256 + type: string + port: + description: port is the port number to access the Nutanix Prism Central or Element (cluster) + format: int32 + maximum: 65535 + minimum: 1 + type: integer + required: + - address + - port + type: object + prismElements: + description: prismElements holds one or more endpoint address and port data to access the Nutanix Prism Elements (clusters) of the Nutanix Prism Central. Currently we only support one Prism Element (cluster) for an OpenShift cluster, where all the Nutanix resources (VMs, subnets, volumes, etc.) used in the OpenShift cluster are located. In the future, we may support Nutanix resources (VMs, etc.) spread over multiple Prism Elements (clusters) of the Prism Central. + items: + description: NutanixPrismElementEndpoint holds the name and endpoint data for a Prism Element (cluster) + properties: + endpoint: + description: endpoint holds the endpoint address and port data of the Prism Element (cluster). When a cluster-wide proxy is installed, by default, this endpoint will be accessed via the proxy. Should you wish for communication with this endpoint not to be proxied, please add the endpoint to the proxy spec.noProxy list. + properties: + address: + description: address is the endpoint address (DNS name or IP address) of the Nutanix Prism Central or Element (cluster) + maxLength: 256 + type: string + port: + description: port is the port number to access the Nutanix Prism Central or Element (cluster) + format: int32 + maximum: 65535 + minimum: 1 + type: integer + required: + - address + - port + type: object + name: + description: name is the name of the Prism Element (cluster). This value will correspond with the cluster field configured on other resources (eg Machines, PVCs, etc). + maxLength: 256 + type: string + required: + - endpoint + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + required: + - prismCentral + - prismElements + type: object + openstack: + description: OpenStack contains settings specific to the OpenStack infrastructure provider. + type: object + ovirt: + description: Ovirt contains settings specific to the oVirt infrastructure provider. + type: object + powervs: + description: PowerVS contains settings specific to the IBM Power Systems Virtual Servers infrastructure provider. + properties: + serviceEndpoints: + description: serviceEndpoints is a list of custom endpoints which will override the default service endpoints of a Power VS service. + items: + description: PowervsServiceEndpoint stores the configuration of a custom url to override existing defaults of PowerVS Services. + properties: + name: + description: name is the name of the Power VS service. Few of the services are IAM - https://cloud.ibm.com/apidocs/iam-identity-token-api ResourceController - https://cloud.ibm.com/apidocs/resource-controller/resource-controller Power Cloud - https://cloud.ibm.com/apidocs/power-cloud + pattern: ^[a-z0-9-]+$ + type: string + url: + description: url is fully qualified URI with scheme https, that overrides the default generated endpoint for a client. This must be provided and cannot be empty. + format: uri + pattern: ^https:// + type: string + required: + - name + - url + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + type: object + type: + description: type is the underlying infrastructure provider for the cluster. This value controls whether infrastructure automation such as service load balancers, dynamic volume provisioning, machine creation and deletion, and other integrations are enabled. If None, no infrastructure automation is enabled. Allowed values are "AWS", "Azure", "BareMetal", "GCP", "Libvirt", "OpenStack", "VSphere", "oVirt", "KubeVirt", "EquinixMetal", "PowerVS", "AlibabaCloud", "Nutanix" and "None". Individual components may not support all platforms, and must handle unrecognized platforms as None if they do not support that platform. + enum: + - "" + - AWS + - Azure + - BareMetal + - GCP + - Libvirt + - OpenStack + - None + - VSphere + - oVirt + - IBMCloud + - KubeVirt + - EquinixMetal + - PowerVS + - AlibabaCloud + - Nutanix + - External + type: string + vsphere: + description: VSphere contains settings specific to the VSphere infrastructure provider. + properties: + failureDomains: + description: failureDomains contains the definition of region, zone and the vCenter topology. If this is omitted failure domains (regions and zones) will not be used. + items: + description: VSpherePlatformFailureDomainSpec holds the region and zone failure domain and the vCenter topology of that failure domain. + properties: + name: + description: name defines the arbitrary but unique name of a failure domain. + maxLength: 256 + minLength: 1 + type: string + region: + description: region defines the name of a region tag that will be attached to a vCenter datacenter. The tag category in vCenter must be named openshift-region. + maxLength: 80 + minLength: 1 + type: string + server: + anyOf: + - format: ipv4 + - format: ipv6 + - format: hostname + description: server is the fully-qualified domain name or the IP address of the vCenter server. --- + maxLength: 255 + minLength: 1 + type: string + topology: + description: Topology describes a given failure domain using vSphere constructs + properties: + computeCluster: + description: computeCluster the absolute path of the vCenter cluster in which virtual machine will be located. The absolute path is of the form //host/. The maximum length of the path is 2048 characters. + maxLength: 2048 + pattern: ^/.*?/host/.*? + type: string + datacenter: + description: datacenter is the name of vCenter datacenter in which virtual machines will be located. The maximum length of the datacenter name is 80 characters. + maxLength: 80 + type: string + datastore: + description: datastore is the absolute path of the datastore in which the virtual machine is located. The absolute path is of the form //datastore/ The maximum length of the path is 2048 characters. + maxLength: 2048 + pattern: ^/.*?/datastore/.*? + type: string + folder: + description: folder is the absolute path of the folder where virtual machines are located. The absolute path is of the form //vm/. The maximum length of the path is 2048 characters. + maxLength: 2048 + pattern: ^/.*?/vm/.*? + type: string + networks: + description: networks is the list of port group network names within this failure domain. Currently, we only support a single interface per RHCOS virtual machine. The available networks (port groups) can be listed using `govc ls 'network/*'` The single interface should be the absolute path of the form //network/. + items: + type: string + maxItems: 1 + minItems: 1 + type: array + resourcePool: + description: resourcePool is the absolute path of the resource pool where virtual machines will be created. The absolute path is of the form //host//Resources/. The maximum length of the path is 2048 characters. + maxLength: 2048 + pattern: ^/.*?/host/.*?/Resources.* + type: string + required: + - computeCluster + - datacenter + - datastore + - networks + type: object + zone: + description: zone defines the name of a zone tag that will be attached to a vCenter cluster. The tag category in vCenter must be named openshift-zone. + maxLength: 80 + minLength: 1 + type: string + required: + - name + - region + - server + - topology + - zone + type: object + type: array + nodeNetworking: + description: nodeNetworking contains the definition of internal and external network constraints for assigning the node's networking. If this field is omitted, networking defaults to the legacy address selection behavior which is to only support a single address and return the first one found. + properties: + external: + description: external represents the network configuration of the node that is externally routable. + properties: + excludeNetworkSubnetCidr: + description: excludeNetworkSubnetCidr IP addresses in subnet ranges will be excluded when selecting the IP address from the VirtualMachine's VM for use in the status.addresses fields. --- + items: + format: cidr + type: string + type: array + network: + description: network VirtualMachine's VM Network names that will be used to when searching for status.addresses fields. Note that if internal.networkSubnetCIDR and external.networkSubnetCIDR are not set, then the vNIC associated to this network must only have a single IP address assigned to it. The available networks (port groups) can be listed using `govc ls 'network/*'` + type: string + networkSubnetCidr: + description: networkSubnetCidr IP address on VirtualMachine's network interfaces included in the fields' CIDRs that will be used in respective status.addresses fields. --- + items: + format: cidr + type: string + type: array + type: object + internal: + description: internal represents the network configuration of the node that is routable only within the cluster. + properties: + excludeNetworkSubnetCidr: + description: excludeNetworkSubnetCidr IP addresses in subnet ranges will be excluded when selecting the IP address from the VirtualMachine's VM for use in the status.addresses fields. --- + items: + format: cidr + type: string + type: array + network: + description: network VirtualMachine's VM Network names that will be used to when searching for status.addresses fields. Note that if internal.networkSubnetCIDR and external.networkSubnetCIDR are not set, then the vNIC associated to this network must only have a single IP address assigned to it. The available networks (port groups) can be listed using `govc ls 'network/*'` + type: string + networkSubnetCidr: + description: networkSubnetCidr IP address on VirtualMachine's network interfaces included in the fields' CIDRs that will be used in respective status.addresses fields. --- + items: + format: cidr + type: string + type: array + type: object + type: object + vcenters: + description: vcenters holds the connection details for services to communicate with vCenter. Currently, only a single vCenter is supported. --- + items: + description: VSpherePlatformVCenterSpec stores the vCenter connection fields. This is used by the vSphere CCM. + properties: + datacenters: + description: The vCenter Datacenters in which the RHCOS vm guests are located. This field will be used by the Cloud Controller Manager. Each datacenter listed here should be used within a topology. + items: + type: string + minItems: 1 + type: array + port: + description: port is the TCP port that will be used to communicate to the vCenter endpoint. When omitted, this means the user has no opinion and it is up to the platform to choose a sensible default, which is subject to change over time. + format: int32 + maximum: 32767 + minimum: 1 + type: integer + server: + anyOf: + - format: ipv4 + - format: ipv6 + - format: hostname + description: server is the fully-qualified domain name or the IP address of the vCenter server. --- + maxLength: 255 + type: string + required: + - datacenters + - server + type: object + maxItems: 1 + minItems: 0 + type: array + type: object + type: object + type: object + status: + description: status holds observed values from the cluster. They may not be overridden. + properties: + apiServerInternalURI: + description: apiServerInternalURL is a valid URI with scheme 'https', address and optionally a port (defaulting to 443). apiServerInternalURL can be used by components like kubelets, to contact the Kubernetes API server using the infrastructure provider rather than Kubernetes networking. + type: string + apiServerURL: + description: apiServerURL is a valid URI with scheme 'https', address and optionally a port (defaulting to 443). apiServerURL can be used by components like the web console to tell users where to find the Kubernetes API. + type: string + controlPlaneTopology: + default: HighlyAvailable + description: controlPlaneTopology expresses the expectations for operands that normally run on control nodes. The default is 'HighlyAvailable', which represents the behavior operators have in a "normal" cluster. The 'SingleReplica' mode will be used in single-node deployments and the operators should not configure the operand for highly-available operation The 'External' mode indicates that the control plane is hosted externally to the cluster and that its components are not visible within the cluster. + enum: + - HighlyAvailable + - SingleReplica + - External + type: string + cpuPartitioning: + default: None + description: cpuPartitioning expresses if CPU partitioning is a currently enabled feature in the cluster. CPU Partitioning means that this cluster can support partitioning workloads to specific CPU Sets. Valid values are "None" and "AllNodes". When omitted, the default value is "None". The default value of "None" indicates that no nodes will be setup with CPU partitioning. The "AllNodes" value indicates that all nodes have been setup with CPU partitioning, and can then be further configured via the PerformanceProfile API. + enum: + - None + - AllNodes + type: string + etcdDiscoveryDomain: + description: 'etcdDiscoveryDomain is the domain used to fetch the SRV records for discovering etcd servers and clients. For more info: https://github.com/etcd-io/etcd/blob/329be66e8b3f9e2e6af83c123ff89297e49ebd15/Documentation/op-guide/clustering.md#dns-discovery deprecated: as of 4.7, this field is no longer set or honored. It will be removed in a future release.' + type: string + infrastructureName: + description: infrastructureName uniquely identifies a cluster with a human friendly name. Once set it should not be changed. Must be of max length 27 and must have only alphanumeric or hyphen characters. + type: string + infrastructureTopology: + default: HighlyAvailable + description: 'infrastructureTopology expresses the expectations for infrastructure services that do not run on control plane nodes, usually indicated by a node selector for a `role` value other than `master`. The default is ''HighlyAvailable'', which represents the behavior operators have in a "normal" cluster. The ''SingleReplica'' mode will be used in single-node deployments and the operators should not configure the operand for highly-available operation NOTE: External topology mode is not applicable for this field.' + enum: + - HighlyAvailable + - SingleReplica + type: string + platform: + description: "platform is the underlying infrastructure provider for the cluster. \n Deprecated: Use platformStatus.type instead." + enum: + - "" + - AWS + - Azure + - BareMetal + - GCP + - Libvirt + - OpenStack + - None + - VSphere + - oVirt + - IBMCloud + - KubeVirt + - EquinixMetal + - PowerVS + - AlibabaCloud + - Nutanix + - External + type: string + platformStatus: + description: platformStatus holds status information specific to the underlying infrastructure provider. + properties: + alibabaCloud: + description: AlibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider. + properties: + region: + description: region specifies the region for Alibaba Cloud resources created for the cluster. + pattern: ^[0-9A-Za-z-]+$ + type: string + resourceGroupID: + description: resourceGroupID is the ID of the resource group for the cluster. + pattern: ^(rg-[0-9A-Za-z]+)?$ + type: string + resourceTags: + description: resourceTags is a list of additional tags to apply to Alibaba Cloud resources created for the cluster. + items: + description: AlibabaCloudResourceTag is the set of tags to add to apply to resources. + properties: + key: + description: key is the key of the tag. + maxLength: 128 + minLength: 1 + type: string + value: + description: value is the value of the tag. + maxLength: 128 + minLength: 1 + type: string + required: + - key + - value + type: object + maxItems: 20 + type: array + x-kubernetes-list-map-keys: + - key + x-kubernetes-list-type: map + required: + - region + type: object + aws: + description: AWS contains settings specific to the Amazon Web Services infrastructure provider. + properties: + region: + description: region holds the default AWS region for new AWS resources created by the cluster. + type: string + resourceTags: + description: resourceTags is a list of additional tags to apply to AWS resources created for the cluster. See https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html for information on tagging AWS resources. AWS supports a maximum of 50 tags per resource. OpenShift reserves 25 tags for its use, leaving 25 tags available for the user. + items: + description: AWSResourceTag is a tag to apply to AWS resources created for the cluster. + properties: + key: + description: key is the key of the tag + maxLength: 128 + minLength: 1 + pattern: ^[0-9A-Za-z_.:/=+-@]+$ + type: string + value: + description: value is the value of the tag. Some AWS service do not support empty values. Since tags are added to resources in many services, the length of the tag value must meet the requirements of all services. + maxLength: 256 + minLength: 1 + pattern: ^[0-9A-Za-z_.:/=+-@]+$ + type: string + required: + - key + - value + type: object + maxItems: 25 + type: array + serviceEndpoints: + description: ServiceEndpoints list contains custom endpoints which will override default service endpoint of AWS Services. There must be only one ServiceEndpoint for a service. + items: + description: AWSServiceEndpoint store the configuration of a custom url to override existing defaults of AWS Services. + properties: + name: + description: name is the name of the AWS service. The list of all the service names can be found at https://docs.aws.amazon.com/general/latest/gr/aws-service-information.html This must be provided and cannot be empty. + pattern: ^[a-z0-9-]+$ + type: string + url: + description: url is fully qualified URI with scheme https, that overrides the default generated endpoint for a client. This must be provided and cannot be empty. + pattern: ^https:// + type: string + type: object + type: array + type: object + azure: + description: Azure contains settings specific to the Azure infrastructure provider. + properties: + armEndpoint: + description: armEndpoint specifies a URL to use for resource management in non-soverign clouds such as Azure Stack. + type: string + cloudName: + description: cloudName is the name of the Azure cloud environment which can be used to configure the Azure SDK with the appropriate Azure API endpoints. If empty, the value is equal to `AzurePublicCloud`. + enum: + - "" + - AzurePublicCloud + - AzureUSGovernmentCloud + - AzureChinaCloud + - AzureGermanCloud + - AzureStackCloud + type: string + networkResourceGroupName: + description: networkResourceGroupName is the Resource Group for network resources like the Virtual Network and Subnets used by the cluster. If empty, the value is same as ResourceGroupName. + type: string + resourceGroupName: + description: resourceGroupName is the Resource Group for new Azure resources created for the cluster. + type: string + resourceTags: + description: resourceTags is a list of additional tags to apply to Azure resources created for the cluster. See https://docs.microsoft.com/en-us/rest/api/resources/tags for information on tagging Azure resources. Due to limitations on Automation, Content Delivery Network, DNS Azure resources, a maximum of 15 tags may be applied. OpenShift reserves 5 tags for internal use, allowing 10 tags for user configuration. + items: + description: AzureResourceTag is a tag to apply to Azure resources created for the cluster. + properties: + key: + description: key is the key part of the tag. A tag key can have a maximum of 128 characters and cannot be empty. Key must begin with a letter, end with a letter, number or underscore, and must contain only alphanumeric characters and the following special characters `_ . -`. + maxLength: 128 + minLength: 1 + pattern: ^[a-zA-Z]([0-9A-Za-z_.-]*[0-9A-Za-z_])?$ + type: string + value: + description: 'value is the value part of the tag. A tag value can have a maximum of 256 characters and cannot be empty. Value must contain only alphanumeric characters and the following special characters `_ + , - . / : ; < = > ? @`.' + maxLength: 256 + minLength: 1 + pattern: ^[0-9A-Za-z_.=+-@]+$ + type: string + required: + - key + - value + type: object + maxItems: 10 + type: array + x-kubernetes-validations: + - message: resourceTags are immutable and may only be configured during installation + rule: self.all(x, x in oldSelf) && oldSelf.all(x, x in self) + type: object + x-kubernetes-validations: + - message: resourceTags may only be configured during installation + rule: '!has(oldSelf.resourceTags) && !has(self.resourceTags) || has(oldSelf.resourceTags) && has(self.resourceTags)' + baremetal: + description: BareMetal contains settings specific to the BareMetal platform. + properties: + apiServerInternalIP: + description: "apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers. \n Deprecated: Use APIServerInternalIPs instead." + type: string + apiServerInternalIPs: + description: apiServerInternalIPs are the IP addresses to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. These are the IPs for a self-hosted load balancer in front of the API servers. In dual stack clusters this list contains two IPs otherwise only one. + format: ip + items: + type: string + maxItems: 2 + type: array + ingressIP: + description: "ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names. \n Deprecated: Use IngressIPs instead." + type: string + ingressIPs: + description: ingressIPs are the external IPs which route to the default ingress controller. The IPs are suitable targets of a wildcard DNS record used to resolve default route host names. In dual stack clusters this list contains two IPs otherwise only one. + format: ip + items: + type: string + maxItems: 2 + type: array + loadBalancer: + default: + type: OpenShiftManagedDefault + description: loadBalancer defines how the load balancer used by the cluster is configured. + properties: + type: + default: OpenShiftManagedDefault + description: type defines the type of load balancer used by the cluster on BareMetal platform which can be a user-managed or openshift-managed load balancer that is to be used for the OpenShift API and Ingress endpoints. When set to OpenShiftManagedDefault the static pods in charge of API and Ingress traffic load-balancing defined in the machine config operator will be deployed. When set to UserManaged these static pods will not be deployed and it is expected that the load balancer is configured out of band by the deployer. When omitted, this means no opinion and the platform is left to choose a reasonable default. The default value is OpenShiftManagedDefault. + enum: + - OpenShiftManagedDefault + - UserManaged + type: string + x-kubernetes-validations: + - message: type is immutable once set + rule: oldSelf == '' || self == oldSelf + type: object + nodeDNSIP: + description: nodeDNSIP is the IP address for the internal DNS used by the nodes. Unlike the one managed by the DNS operator, `NodeDNSIP` provides name resolution for the nodes themselves. There is no DNS-as-a-service for BareMetal deployments. In order to minimize necessary changes to the datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames to the nodes in the cluster. + type: string + type: object + equinixMetal: + description: EquinixMetal contains settings specific to the Equinix Metal infrastructure provider. + properties: + apiServerInternalIP: + description: apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers. + type: string + ingressIP: + description: ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names. + type: string + type: object + external: + description: External contains settings specific to the generic External infrastructure provider. + properties: + cloudControllerManager: + description: cloudControllerManager contains settings specific to the external Cloud Controller Manager (a.k.a. CCM or CPI). When omitted, new nodes will be not tainted and no extra initialization from the cloud controller manager is expected. + properties: + state: + description: "state determines whether or not an external Cloud Controller Manager is expected to be installed within the cluster. https://kubernetes.io/docs/tasks/administer-cluster/running-cloud-controller/#running-cloud-controller-manager \n Valid values are \"External\", \"None\" and omitted. When set to \"External\", new nodes will be tainted as uninitialized when created, preventing them from running workloads until they are initialized by the cloud controller manager. When omitted or set to \"None\", new nodes will be not tainted and no extra initialization from the cloud controller manager is expected." + enum: + - "" + - External + - None + type: string + x-kubernetes-validations: + - message: state is immutable once set + rule: self == oldSelf + type: object + x-kubernetes-validations: + - message: state may not be added or removed once set + rule: (has(self.state) == has(oldSelf.state)) || (!has(oldSelf.state) && self.state != "External") + type: object + x-kubernetes-validations: + - message: cloudControllerManager may not be added or removed once set + rule: has(self.cloudControllerManager) == has(oldSelf.cloudControllerManager) + gcp: + description: GCP contains settings specific to the Google Cloud Platform infrastructure provider. + properties: + projectID: + description: resourceGroupName is the Project ID for new GCP resources created for the cluster. + type: string + region: + description: region holds the region for new GCP resources created for the cluster. + type: string + resourceLabels: + description: resourceLabels is a list of additional labels to apply to GCP resources created for the cluster. See https://cloud.google.com/compute/docs/labeling-resources for information on labeling GCP resources. GCP supports a maximum of 64 labels per resource. OpenShift reserves 32 labels for internal use, allowing 32 labels for user configuration. + items: + description: GCPResourceLabel is a label to apply to GCP resources created for the cluster. + properties: + key: + description: key is the key part of the label. A label key can have a maximum of 63 characters and cannot be empty. Label key must begin with a lowercase letter, and must contain only lowercase letters, numeric characters, and the following special characters `_-`. Label key must not have the reserved prefixes `kubernetes-io` and `openshift-io`. + maxLength: 63 + minLength: 1 + pattern: ^[a-z][0-9a-z_-]+$ + type: string + x-kubernetes-validations: + - message: label keys must not start with either `openshift-io` or `kubernetes-io` + rule: '!self.startsWith(''openshift-io'') && !self.startsWith(''kubernetes-io'')' + value: + description: value is the value part of the label. A label value can have a maximum of 63 characters and cannot be empty. Value must contain only lowercase letters, numeric characters, and the following special characters `_-`. + maxLength: 63 + minLength: 1 + pattern: ^[0-9a-z_-]+$ + type: string + required: + - key + - value + type: object + maxItems: 32 + type: array + x-kubernetes-list-map-keys: + - key + x-kubernetes-list-type: map + x-kubernetes-validations: + - message: resourceLabels are immutable and may only be configured during installation + rule: self.all(x, x in oldSelf) && oldSelf.all(x, x in self) + resourceTags: + description: resourceTags is a list of additional tags to apply to GCP resources created for the cluster. See https://cloud.google.com/resource-manager/docs/tags/tags-overview for information on tagging GCP resources. GCP supports a maximum of 50 tags per resource. + items: + description: GCPResourceTag is a tag to apply to GCP resources created for the cluster. + properties: + key: + description: key is the key part of the tag. A tag key can have a maximum of 63 characters and cannot be empty. Tag key must begin and end with an alphanumeric character, and must contain only uppercase, lowercase alphanumeric characters, and the following special characters `._-`. + maxLength: 63 + minLength: 1 + pattern: ^[a-zA-Z0-9]([0-9A-Za-z_.-]{0,61}[a-zA-Z0-9])?$ + type: string + parentID: + description: 'parentID is the ID of the hierarchical resource where the tags are defined, e.g. at the Organization or the Project level. To find the Organization or Project ID refer to the following pages: https://cloud.google.com/resource-manager/docs/creating-managing-organization#retrieving_your_organization_id, https://cloud.google.com/resource-manager/docs/creating-managing-projects#identifying_projects. An OrganizationID must consist of decimal numbers, and cannot have leading zeroes. A ProjectID must be 6 to 30 characters in length, can only contain lowercase letters, numbers, and hyphens, and must start with a letter, and cannot end with a hyphen.' + maxLength: 32 + minLength: 1 + pattern: (^[1-9][0-9]{0,31}$)|(^[a-z][a-z0-9-]{4,28}[a-z0-9]$) + type: string + value: + description: value is the value part of the tag. A tag value can have a maximum of 63 characters and cannot be empty. Tag value must begin and end with an alphanumeric character, and must contain only uppercase, lowercase alphanumeric characters, and the following special characters `_-.@%=+:,*#&(){}[]` and spaces. + maxLength: 63 + minLength: 1 + pattern: ^[a-zA-Z0-9]([0-9A-Za-z_.@%=+:,*#&()\[\]{}\-\s]{0,61}[a-zA-Z0-9])?$ + type: string + required: + - key + - parentID + - value + type: object + maxItems: 50 + type: array + x-kubernetes-list-map-keys: + - key + x-kubernetes-list-type: map + x-kubernetes-validations: + - message: resourceTags are immutable and may only be configured during installation + rule: self.all(x, x in oldSelf) && oldSelf.all(x, x in self) + type: object + x-kubernetes-validations: + - message: resourceLabels may only be configured during installation + rule: '!has(oldSelf.resourceLabels) && !has(self.resourceLabels) || has(oldSelf.resourceLabels) && has(self.resourceLabels)' + - message: resourceTags may only be configured during installation + rule: '!has(oldSelf.resourceTags) && !has(self.resourceTags) || has(oldSelf.resourceTags) && has(self.resourceTags)' + ibmcloud: + description: IBMCloud contains settings specific to the IBMCloud infrastructure provider. + properties: + cisInstanceCRN: + description: CISInstanceCRN is the CRN of the Cloud Internet Services instance managing the DNS zone for the cluster's base domain + type: string + dnsInstanceCRN: + description: DNSInstanceCRN is the CRN of the DNS Services instance managing the DNS zone for the cluster's base domain + type: string + location: + description: Location is where the cluster has been deployed + type: string + providerType: + description: ProviderType indicates the type of cluster that was created + type: string + resourceGroupName: + description: ResourceGroupName is the Resource Group for new IBMCloud resources created for the cluster. + type: string + type: object + kubevirt: + description: Kubevirt contains settings specific to the kubevirt infrastructure provider. + properties: + apiServerInternalIP: + description: apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers. + type: string + ingressIP: + description: ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names. + type: string + type: object + nutanix: + description: Nutanix contains settings specific to the Nutanix infrastructure provider. + properties: + apiServerInternalIP: + description: "apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers. \n Deprecated: Use APIServerInternalIPs instead." + type: string + apiServerInternalIPs: + description: apiServerInternalIPs are the IP addresses to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. These are the IPs for a self-hosted load balancer in front of the API servers. In dual stack clusters this list contains two IPs otherwise only one. + format: ip + items: + type: string + maxItems: 2 + type: array + ingressIP: + description: "ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names. \n Deprecated: Use IngressIPs instead." + type: string + ingressIPs: + description: ingressIPs are the external IPs which route to the default ingress controller. The IPs are suitable targets of a wildcard DNS record used to resolve default route host names. In dual stack clusters this list contains two IPs otherwise only one. + format: ip + items: + type: string + maxItems: 2 + type: array + loadBalancer: + default: + type: OpenShiftManagedDefault + description: loadBalancer defines how the load balancer used by the cluster is configured. + properties: + type: + default: OpenShiftManagedDefault + description: type defines the type of load balancer used by the cluster on Nutanix platform which can be a user-managed or openshift-managed load balancer that is to be used for the OpenShift API and Ingress endpoints. When set to OpenShiftManagedDefault the static pods in charge of API and Ingress traffic load-balancing defined in the machine config operator will be deployed. When set to UserManaged these static pods will not be deployed and it is expected that the load balancer is configured out of band by the deployer. When omitted, this means no opinion and the platform is left to choose a reasonable default. The default value is OpenShiftManagedDefault. + enum: + - OpenShiftManagedDefault + - UserManaged + type: string + x-kubernetes-validations: + - message: type is immutable once set + rule: oldSelf == '' || self == oldSelf + type: object + type: object + openstack: + description: OpenStack contains settings specific to the OpenStack infrastructure provider. + properties: + apiServerInternalIP: + description: "apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers. \n Deprecated: Use APIServerInternalIPs instead." + type: string + apiServerInternalIPs: + description: apiServerInternalIPs are the IP addresses to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. These are the IPs for a self-hosted load balancer in front of the API servers. In dual stack clusters this list contains two IPs otherwise only one. + format: ip + items: + type: string + maxItems: 2 + type: array + cloudName: + description: cloudName is the name of the desired OpenStack cloud in the client configuration file (`clouds.yaml`). + type: string + ingressIP: + description: "ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names. \n Deprecated: Use IngressIPs instead." + type: string + ingressIPs: + description: ingressIPs are the external IPs which route to the default ingress controller. The IPs are suitable targets of a wildcard DNS record used to resolve default route host names. In dual stack clusters this list contains two IPs otherwise only one. + format: ip + items: + type: string + maxItems: 2 + type: array + loadBalancer: + default: + type: OpenShiftManagedDefault + description: loadBalancer defines how the load balancer used by the cluster is configured. + properties: + type: + default: OpenShiftManagedDefault + description: type defines the type of load balancer used by the cluster on OpenStack platform which can be a user-managed or openshift-managed load balancer that is to be used for the OpenShift API and Ingress endpoints. When set to OpenShiftManagedDefault the static pods in charge of API and Ingress traffic load-balancing defined in the machine config operator will be deployed. When set to UserManaged these static pods will not be deployed and it is expected that the load balancer is configured out of band by the deployer. When omitted, this means no opinion and the platform is left to choose a reasonable default. The default value is OpenShiftManagedDefault. + enum: + - OpenShiftManagedDefault + - UserManaged + type: string + x-kubernetes-validations: + - message: type is immutable once set + rule: oldSelf == '' || self == oldSelf + type: object + nodeDNSIP: + description: nodeDNSIP is the IP address for the internal DNS used by the nodes. Unlike the one managed by the DNS operator, `NodeDNSIP` provides name resolution for the nodes themselves. There is no DNS-as-a-service for OpenStack deployments. In order to minimize necessary changes to the datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames to the nodes in the cluster. + type: string + type: object + ovirt: + description: Ovirt contains settings specific to the oVirt infrastructure provider. + properties: + apiServerInternalIP: + description: "apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers. \n Deprecated: Use APIServerInternalIPs instead." + type: string + apiServerInternalIPs: + description: apiServerInternalIPs are the IP addresses to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. These are the IPs for a self-hosted load balancer in front of the API servers. In dual stack clusters this list contains two IPs otherwise only one. + format: ip + items: + type: string + maxItems: 2 + type: array + ingressIP: + description: "ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names. \n Deprecated: Use IngressIPs instead." + type: string + ingressIPs: + description: ingressIPs are the external IPs which route to the default ingress controller. The IPs are suitable targets of a wildcard DNS record used to resolve default route host names. In dual stack clusters this list contains two IPs otherwise only one. + format: ip + items: + type: string + maxItems: 2 + type: array + loadBalancer: + default: + type: OpenShiftManagedDefault + description: loadBalancer defines how the load balancer used by the cluster is configured. + properties: + type: + default: OpenShiftManagedDefault + description: type defines the type of load balancer used by the cluster on Ovirt platform which can be a user-managed or openshift-managed load balancer that is to be used for the OpenShift API and Ingress endpoints. When set to OpenShiftManagedDefault the static pods in charge of API and Ingress traffic load-balancing defined in the machine config operator will be deployed. When set to UserManaged these static pods will not be deployed and it is expected that the load balancer is configured out of band by the deployer. When omitted, this means no opinion and the platform is left to choose a reasonable default. The default value is OpenShiftManagedDefault. + enum: + - OpenShiftManagedDefault + - UserManaged + type: string + x-kubernetes-validations: + - message: type is immutable once set + rule: oldSelf == '' || self == oldSelf + type: object + nodeDNSIP: + description: 'deprecated: as of 4.6, this field is no longer set or honored. It will be removed in a future release.' + type: string + type: object + powervs: + description: PowerVS contains settings specific to the Power Systems Virtual Servers infrastructure provider. + properties: + cisInstanceCRN: + description: CISInstanceCRN is the CRN of the Cloud Internet Services instance managing the DNS zone for the cluster's base domain + type: string + dnsInstanceCRN: + description: DNSInstanceCRN is the CRN of the DNS Services instance managing the DNS zone for the cluster's base domain + type: string + region: + description: region holds the default Power VS region for new Power VS resources created by the cluster. + type: string + resourceGroup: + description: 'resourceGroup is the resource group name for new IBMCloud resources created for a cluster. The resource group specified here will be used by cluster-image-registry-operator to set up a COS Instance in IBMCloud for the cluster registry. More about resource groups can be found here: https://cloud.ibm.com/docs/account?topic=account-rgs. When omitted, the image registry operator won''t be able to configure storage, which results in the image registry cluster operator not being in an available state.' + maxLength: 40 + pattern: ^[a-zA-Z0-9-_ ]+$ + type: string + x-kubernetes-validations: + - message: resourceGroup is immutable once set + rule: oldSelf == '' || self == oldSelf + serviceEndpoints: + description: serviceEndpoints is a list of custom endpoints which will override the default service endpoints of a Power VS service. + items: + description: PowervsServiceEndpoint stores the configuration of a custom url to override existing defaults of PowerVS Services. + properties: + name: + description: name is the name of the Power VS service. Few of the services are IAM - https://cloud.ibm.com/apidocs/iam-identity-token-api ResourceController - https://cloud.ibm.com/apidocs/resource-controller/resource-controller Power Cloud - https://cloud.ibm.com/apidocs/power-cloud + pattern: ^[a-z0-9-]+$ + type: string + url: + description: url is fully qualified URI with scheme https, that overrides the default generated endpoint for a client. This must be provided and cannot be empty. + format: uri + pattern: ^https:// + type: string + required: + - name + - url + type: object + type: array + zone: + description: 'zone holds the default zone for the new Power VS resources created by the cluster. Note: Currently only single-zone OCP clusters are supported' + type: string + type: object + x-kubernetes-validations: + - message: cannot unset resourceGroup once set + rule: '!has(oldSelf.resourceGroup) || has(self.resourceGroup)' + type: + description: "type is the underlying infrastructure provider for the cluster. This value controls whether infrastructure automation such as service load balancers, dynamic volume provisioning, machine creation and deletion, and other integrations are enabled. If None, no infrastructure automation is enabled. Allowed values are \"AWS\", \"Azure\", \"BareMetal\", \"GCP\", \"Libvirt\", \"OpenStack\", \"VSphere\", \"oVirt\", \"EquinixMetal\", \"PowerVS\", \"AlibabaCloud\", \"Nutanix\" and \"None\". Individual components may not support all platforms, and must handle unrecognized platforms as None if they do not support that platform. \n This value will be synced with to the `status.platform` and `status.platformStatus.type`. Currently this value cannot be changed once set." + enum: + - "" + - AWS + - Azure + - BareMetal + - GCP + - Libvirt + - OpenStack + - None + - VSphere + - oVirt + - IBMCloud + - KubeVirt + - EquinixMetal + - PowerVS + - AlibabaCloud + - Nutanix + - External + type: string + vsphere: + description: VSphere contains settings specific to the VSphere infrastructure provider. + properties: + apiServerInternalIP: + description: "apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers. \n Deprecated: Use APIServerInternalIPs instead." + type: string + apiServerInternalIPs: + description: apiServerInternalIPs are the IP addresses to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. These are the IPs for a self-hosted load balancer in front of the API servers. In dual stack clusters this list contains two IPs otherwise only one. + format: ip + items: + type: string + maxItems: 2 + type: array + ingressIP: + description: "ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names. \n Deprecated: Use IngressIPs instead." + type: string + ingressIPs: + description: ingressIPs are the external IPs which route to the default ingress controller. The IPs are suitable targets of a wildcard DNS record used to resolve default route host names. In dual stack clusters this list contains two IPs otherwise only one. + format: ip + items: + type: string + maxItems: 2 + type: array + loadBalancer: + default: + type: OpenShiftManagedDefault + description: loadBalancer defines how the load balancer used by the cluster is configured. + properties: + type: + default: OpenShiftManagedDefault + description: type defines the type of load balancer used by the cluster on VSphere platform which can be a user-managed or openshift-managed load balancer that is to be used for the OpenShift API and Ingress endpoints. When set to OpenShiftManagedDefault the static pods in charge of API and Ingress traffic load-balancing defined in the machine config operator will be deployed. When set to UserManaged these static pods will not be deployed and it is expected that the load balancer is configured out of band by the deployer. When omitted, this means no opinion and the platform is left to choose a reasonable default. The default value is OpenShiftManagedDefault. + enum: + - OpenShiftManagedDefault + - UserManaged + type: string + x-kubernetes-validations: + - message: type is immutable once set + rule: oldSelf == '' || self == oldSelf + type: object + nodeDNSIP: + description: nodeDNSIP is the IP address for the internal DNS used by the nodes. Unlike the one managed by the DNS operator, `NodeDNSIP` provides name resolution for the nodes themselves. There is no DNS-as-a-service for vSphere deployments. In order to minimize necessary changes to the datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames to the nodes in the cluster. + type: string + type: object + type: object + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure-CustomNoUpgrade.crd.yaml-patch b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure-CustomNoUpgrade.crd.yaml-patch new file mode 100644 index 000000000..d127130ad --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure-CustomNoUpgrade.crd.yaml-patch @@ -0,0 +1,24 @@ +- op: add + path: /spec/versions/name=v1/schema/openAPIV3Schema/properties/spec/properties/platformSpec/properties/vsphere/properties/vcenters/items/properties/server/anyOf + value: + - format: ipv4 + - format: ipv6 + - format: hostname +- op: add + path: /spec/versions/name=v1/schema/openAPIV3Schema/properties/spec/properties/platformSpec/properties/vsphere/properties/failureDomains/items/properties/server/anyOf + value: + - format: ipv4 + - format: ipv6 + - format: hostname +- op: add + path: /spec/versions/name=v1/schema/openAPIV3Schema/properties/spec/properties/platformSpec/properties/vsphere/properties/nodeNetworking/properties/external/properties/excludeNetworkSubnetCidr/items/format + value: cidr +- op: add + path: /spec/versions/name=v1/schema/openAPIV3Schema/properties/spec/properties/platformSpec/properties/vsphere/properties/nodeNetworking/properties/external/properties/networkSubnetCidr/items/format + value: cidr +- op: add + path: /spec/versions/name=v1/schema/openAPIV3Schema/properties/spec/properties/platformSpec/properties/vsphere/properties/nodeNetworking/properties/internal/properties/excludeNetworkSubnetCidr/items/format + value: cidr +- op: add + path: /spec/versions/name=v1/schema/openAPIV3Schema/properties/spec/properties/platformSpec/properties/vsphere/properties/nodeNetworking/properties/internal/properties/networkSubnetCidr/items/format + value: cidr diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure-Default.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure-Default.crd.yaml index fe57bddfc..64a54d5c5 100644 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure-Default.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure-Default.crd.yaml @@ -21,9 +21,6 @@ spec: schema: openAPIV3Schema: description: "Infrastructure holds cluster-wide information about Infrastructure. The canonical name is `cluster` \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." - type: object - required: - - spec properties: apiVersion: description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' @@ -35,43 +32,41 @@ spec: type: object spec: description: spec holds user settable values for configuration - type: object properties: cloudConfig: description: "cloudConfig is a reference to a ConfigMap containing the cloud provider configuration file. This configuration file is used to configure the Kubernetes cloud provider integration when using the built-in cloud provider integration or the external cloud controller manager. The namespace for this config map is openshift-config. \n cloudConfig should only be consumed by the kube_cloud_config controller. The controller is responsible for using the user configuration in the spec for various platforms and combining that with the user provided ConfigMap in this field to create a stitched kube cloud config. The controller generates a ConfigMap `kube-cloud-config` in `openshift-config-managed` namespace with the kube cloud config is stored in `cloud.conf` key. All the clients are expected to use the generated ConfigMap only." - type: object properties: key: description: Key allows pointing to a specific key/value inside of the configmap. This is useful for logical file references. type: string name: type: string + type: object platformSpec: description: platformSpec holds desired information specific to the underlying infrastructure provider. - type: object properties: alibabaCloud: description: AlibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider. type: object aws: description: AWS contains settings specific to the Amazon Web Services infrastructure provider. - type: object properties: serviceEndpoints: description: serviceEndpoints list contains custom endpoints which will override default service endpoint of AWS Services. There must be only one ServiceEndpoint for a service. - type: array items: description: AWSServiceEndpoint store the configuration of a custom url to override existing defaults of AWS Services. - type: object properties: name: description: name is the name of the AWS service. The list of all the service names can be found at https://docs.aws.amazon.com/general/latest/gr/aws-service-information.html This must be provided and cannot be empty. - type: string pattern: ^[a-z0-9-]+$ + type: string url: description: url is fully qualified URI with scheme https, that overrides the default generated endpoint for a client. This must be provided and cannot be empty. - type: string pattern: ^https:// + type: string + type: object + type: array + type: object azure: description: Azure contains settings specific to the Azure infrastructure provider. type: object @@ -83,26 +78,15 @@ spec: type: object external: description: ExternalPlatformType represents generic infrastructure provider. Platform-specific components should be supplemented separately. - type: object properties: - cloudControllerManager: - description: CloudControllerManager contains settings specific to the external Cloud Controller Manager (a.k.a. CCM or CPI) - type: object - properties: - state: - description: "state determines whether or not an external Cloud Controller Manager is expected to be installed within the cluster. https://kubernetes.io/docs/tasks/administer-cluster/running-cloud-controller/#running-cloud-controller-manager \n When set to \"External\", new nodes will be tainted as uninitialized when created, preventing them from running workloads until they are initialized by the cloud controller manager. When omitted or set to \"None\", new nodes will be not tainted and no extra initialization from the cloud controller manager is expected." - type: string - enum: - - "" - - External - - None platformName: + default: Unknown description: PlatformName holds the arbitrary string representing the infrastructure provider name, expected to be set at the installation time. This field is solely for informational and reporting purposes and is not expected to be used for decision-making. type: string - default: Unknown x-kubernetes-validations: - - rule: oldSelf == 'Unknown' || self == oldSelf - message: platform name cannot be changed once set + - message: platform name cannot be changed once set + rule: oldSelf == 'Unknown' || self == oldSelf + type: object gcp: description: GCP contains settings specific to the Google Cloud Platform infrastructure provider. type: object @@ -114,62 +98,62 @@ spec: type: object nutanix: description: Nutanix contains settings specific to the Nutanix infrastructure provider. - type: object - required: - - prismCentral - - prismElements properties: prismCentral: description: prismCentral holds the endpoint address and port to access the Nutanix Prism Central. When a cluster-wide proxy is installed, by default, this endpoint will be accessed via the proxy. Should you wish for communication with this endpoint not to be proxied, please add the endpoint to the proxy spec.noProxy list. - type: object - required: - - address - - port properties: address: description: address is the endpoint address (DNS name or IP address) of the Nutanix Prism Central or Element (cluster) - type: string maxLength: 256 + type: string port: description: port is the port number to access the Nutanix Prism Central or Element (cluster) - type: integer format: int32 maximum: 65535 minimum: 1 + type: integer + required: + - address + - port + type: object prismElements: description: prismElements holds one or more endpoint address and port data to access the Nutanix Prism Elements (clusters) of the Nutanix Prism Central. Currently we only support one Prism Element (cluster) for an OpenShift cluster, where all the Nutanix resources (VMs, subnets, volumes, etc.) used in the OpenShift cluster are located. In the future, we may support Nutanix resources (VMs, etc.) spread over multiple Prism Elements (clusters) of the Prism Central. - type: array items: description: NutanixPrismElementEndpoint holds the name and endpoint data for a Prism Element (cluster) - type: object - required: - - endpoint - - name properties: endpoint: description: endpoint holds the endpoint address and port data of the Prism Element (cluster). When a cluster-wide proxy is installed, by default, this endpoint will be accessed via the proxy. Should you wish for communication with this endpoint not to be proxied, please add the endpoint to the proxy spec.noProxy list. - type: object - required: - - address - - port properties: address: description: address is the endpoint address (DNS name or IP address) of the Nutanix Prism Central or Element (cluster) - type: string maxLength: 256 + type: string port: description: port is the port number to access the Nutanix Prism Central or Element (cluster) - type: integer format: int32 maximum: 65535 minimum: 1 + type: integer + required: + - address + - port + type: object name: description: name is the name of the Prism Element (cluster). This value will correspond with the cluster field configured on other resources (eg Machines, PVCs, etc). - type: string maxLength: 256 + type: string + required: + - endpoint + - name + type: object + type: array x-kubernetes-list-map-keys: - name x-kubernetes-list-type: map + required: + - prismCentral + - prismElements + type: object openstack: description: OpenStack contains settings specific to the OpenStack infrastructure provider. type: object @@ -178,33 +162,32 @@ spec: type: object powervs: description: PowerVS contains settings specific to the IBM Power Systems Virtual Servers infrastructure provider. - type: object properties: serviceEndpoints: description: serviceEndpoints is a list of custom endpoints which will override the default service endpoints of a Power VS service. - type: array items: description: PowervsServiceEndpoint stores the configuration of a custom url to override existing defaults of PowerVS Services. - type: object - required: - - name - - url properties: name: description: name is the name of the Power VS service. Few of the services are IAM - https://cloud.ibm.com/apidocs/iam-identity-token-api ResourceController - https://cloud.ibm.com/apidocs/resource-controller/resource-controller Power Cloud - https://cloud.ibm.com/apidocs/power-cloud - type: string pattern: ^[a-z0-9-]+$ + type: string url: description: url is fully qualified URI with scheme https, that overrides the default generated endpoint for a client. This must be provided and cannot be empty. - type: string format: uri pattern: ^https:// + type: string + required: + - name + - url + type: object + type: array x-kubernetes-list-map-keys: - name x-kubernetes-list-type: map + type: object type: description: type is the underlying infrastructure provider for the cluster. This value controls whether infrastructure automation such as service load balancers, dynamic volume provisioning, machine creation and deletion, and other integrations are enabled. If None, no infrastructure automation is enabled. Allowed values are "AWS", "Azure", "BareMetal", "GCP", "Libvirt", "OpenStack", "VSphere", "oVirt", "KubeVirt", "EquinixMetal", "PowerVS", "AlibabaCloud", "Nutanix" and "None". Individual components may not support all platforms, and must handle unrecognized platforms as None if they do not support that platform. - type: string enum: - "" - AWS @@ -223,12 +206,166 @@ spec: - AlibabaCloud - Nutanix - External + type: string vsphere: description: VSphere contains settings specific to the VSphere infrastructure provider. + properties: + failureDomains: + description: failureDomains contains the definition of region, zone and the vCenter topology. If this is omitted failure domains (regions and zones) will not be used. + items: + description: VSpherePlatformFailureDomainSpec holds the region and zone failure domain and the vCenter topology of that failure domain. + properties: + name: + description: name defines the arbitrary but unique name of a failure domain. + maxLength: 256 + minLength: 1 + type: string + region: + description: region defines the name of a region tag that will be attached to a vCenter datacenter. The tag category in vCenter must be named openshift-region. + maxLength: 80 + minLength: 1 + type: string + server: + anyOf: + - format: ipv4 + - format: ipv6 + - format: hostname + description: server is the fully-qualified domain name or the IP address of the vCenter server. --- + maxLength: 255 + minLength: 1 + type: string + topology: + description: Topology describes a given failure domain using vSphere constructs + properties: + computeCluster: + description: computeCluster the absolute path of the vCenter cluster in which virtual machine will be located. The absolute path is of the form //host/. The maximum length of the path is 2048 characters. + maxLength: 2048 + pattern: ^/.*?/host/.*? + type: string + datacenter: + description: datacenter is the name of vCenter datacenter in which virtual machines will be located. The maximum length of the datacenter name is 80 characters. + maxLength: 80 + type: string + datastore: + description: datastore is the absolute path of the datastore in which the virtual machine is located. The absolute path is of the form //datastore/ The maximum length of the path is 2048 characters. + maxLength: 2048 + pattern: ^/.*?/datastore/.*? + type: string + folder: + description: folder is the absolute path of the folder where virtual machines are located. The absolute path is of the form //vm/. The maximum length of the path is 2048 characters. + maxLength: 2048 + pattern: ^/.*?/vm/.*? + type: string + networks: + description: networks is the list of port group network names within this failure domain. Currently, we only support a single interface per RHCOS virtual machine. The available networks (port groups) can be listed using `govc ls 'network/*'` The single interface should be the absolute path of the form //network/. + items: + type: string + maxItems: 1 + minItems: 1 + type: array + resourcePool: + description: resourcePool is the absolute path of the resource pool where virtual machines will be created. The absolute path is of the form //host//Resources/. The maximum length of the path is 2048 characters. + maxLength: 2048 + pattern: ^/.*?/host/.*?/Resources.* + type: string + required: + - computeCluster + - datacenter + - datastore + - networks + type: object + zone: + description: zone defines the name of a zone tag that will be attached to a vCenter cluster. The tag category in vCenter must be named openshift-zone. + maxLength: 80 + minLength: 1 + type: string + required: + - name + - region + - server + - topology + - zone + type: object + type: array + nodeNetworking: + description: nodeNetworking contains the definition of internal and external network constraints for assigning the node's networking. If this field is omitted, networking defaults to the legacy address selection behavior which is to only support a single address and return the first one found. + properties: + external: + description: external represents the network configuration of the node that is externally routable. + properties: + excludeNetworkSubnetCidr: + description: excludeNetworkSubnetCidr IP addresses in subnet ranges will be excluded when selecting the IP address from the VirtualMachine's VM for use in the status.addresses fields. --- + items: + format: cidr + type: string + type: array + network: + description: network VirtualMachine's VM Network names that will be used to when searching for status.addresses fields. Note that if internal.networkSubnetCIDR and external.networkSubnetCIDR are not set, then the vNIC associated to this network must only have a single IP address assigned to it. The available networks (port groups) can be listed using `govc ls 'network/*'` + type: string + networkSubnetCidr: + description: networkSubnetCidr IP address on VirtualMachine's network interfaces included in the fields' CIDRs that will be used in respective status.addresses fields. --- + items: + format: cidr + type: string + type: array + type: object + internal: + description: internal represents the network configuration of the node that is routable only within the cluster. + properties: + excludeNetworkSubnetCidr: + description: excludeNetworkSubnetCidr IP addresses in subnet ranges will be excluded when selecting the IP address from the VirtualMachine's VM for use in the status.addresses fields. --- + items: + format: cidr + type: string + type: array + network: + description: network VirtualMachine's VM Network names that will be used to when searching for status.addresses fields. Note that if internal.networkSubnetCIDR and external.networkSubnetCIDR are not set, then the vNIC associated to this network must only have a single IP address assigned to it. The available networks (port groups) can be listed using `govc ls 'network/*'` + type: string + networkSubnetCidr: + description: networkSubnetCidr IP address on VirtualMachine's network interfaces included in the fields' CIDRs that will be used in respective status.addresses fields. --- + items: + format: cidr + type: string + type: array + type: object + type: object + vcenters: + description: vcenters holds the connection details for services to communicate with vCenter. Currently, only a single vCenter is supported. --- + items: + description: VSpherePlatformVCenterSpec stores the vCenter connection fields. This is used by the vSphere CCM. + properties: + datacenters: + description: The vCenter Datacenters in which the RHCOS vm guests are located. This field will be used by the Cloud Controller Manager. Each datacenter listed here should be used within a topology. + items: + type: string + minItems: 1 + type: array + port: + description: port is the TCP port that will be used to communicate to the vCenter endpoint. When omitted, this means the user has no opinion and it is up to the platform to choose a sensible default, which is subject to change over time. + format: int32 + maximum: 32767 + minimum: 1 + type: integer + server: + anyOf: + - format: ipv4 + - format: ipv6 + - format: hostname + description: server is the fully-qualified domain name or the IP address of the vCenter server. --- + maxLength: 255 + type: string + required: + - datacenters + - server + type: object + maxItems: 1 + minItems: 0 + type: array type: object + type: object + type: object status: description: status holds observed values from the cluster. They may not be overridden. - type: object properties: apiServerInternalURI: description: apiServerInternalURL is a valid URI with scheme 'https', address and optionally a port (defaulting to 443). apiServerInternalURL can be used by components like kubelets, to contact the Kubernetes API server using the infrastructure provider rather than Kubernetes networking. @@ -237,13 +374,13 @@ spec: description: apiServerURL is a valid URI with scheme 'https', address and optionally a port (defaulting to 443). apiServerURL can be used by components like the web console to tell users where to find the Kubernetes API. type: string controlPlaneTopology: - description: controlPlaneTopology expresses the expectations for operands that normally run on control nodes. The default is 'HighlyAvailable', which represents the behavior operators have in a "normal" cluster. The 'SingleReplica' mode will be used in single-node deployments and the operators should not configure the operand for highly-available operation The 'External' mode indicates that the control plane is hosted externally to the cluster and that its components are not visible within the cluster. - type: string default: HighlyAvailable + description: controlPlaneTopology expresses the expectations for operands that normally run on control nodes. The default is 'HighlyAvailable', which represents the behavior operators have in a "normal" cluster. The 'SingleReplica' mode will be used in single-node deployments and the operators should not configure the operand for highly-available operation The 'External' mode indicates that the control plane is hosted externally to the cluster and that its components are not visible within the cluster. enum: - HighlyAvailable - SingleReplica - External + type: string etcdDiscoveryDomain: description: 'etcdDiscoveryDomain is the domain used to fetch the SRV records for discovering etcd servers and clients. For more info: https://github.com/etcd-io/etcd/blob/329be66e8b3f9e2e6af83c123ff89297e49ebd15/Documentation/op-guide/clustering.md#dns-discovery deprecated: as of 4.7, this field is no longer set or honored. It will be removed in a future release.' type: string @@ -251,15 +388,14 @@ spec: description: infrastructureName uniquely identifies a cluster with a human friendly name. Once set it should not be changed. Must be of max length 27 and must have only alphanumeric or hyphen characters. type: string infrastructureTopology: - description: 'infrastructureTopology expresses the expectations for infrastructure services that do not run on control plane nodes, usually indicated by a node selector for a `role` value other than `master`. The default is ''HighlyAvailable'', which represents the behavior operators have in a "normal" cluster. The ''SingleReplica'' mode will be used in single-node deployments and the operators should not configure the operand for highly-available operation NOTE: External topology mode is not applicable for this field.' - type: string default: HighlyAvailable + description: 'infrastructureTopology expresses the expectations for infrastructure services that do not run on control plane nodes, usually indicated by a node selector for a `role` value other than `master`. The default is ''HighlyAvailable'', which represents the behavior operators have in a "normal" cluster. The ''SingleReplica'' mode will be used in single-node deployments and the operators should not configure the operand for highly-available operation NOTE: External topology mode is not applicable for this field.' enum: - HighlyAvailable - SingleReplica + type: string platform: description: "platform is the underlying infrastructure provider for the cluster. \n Deprecated: Use platformStatus.type instead." - type: string enum: - "" - AWS @@ -278,103 +414,101 @@ spec: - AlibabaCloud - Nutanix - External + type: string platformStatus: description: platformStatus holds status information specific to the underlying infrastructure provider. - type: object properties: alibabaCloud: description: AlibabaCloud contains settings specific to the Alibaba Cloud infrastructure provider. - type: object - required: - - region properties: region: description: region specifies the region for Alibaba Cloud resources created for the cluster. - type: string pattern: ^[0-9A-Za-z-]+$ + type: string resourceGroupID: description: resourceGroupID is the ID of the resource group for the cluster. - type: string pattern: ^(rg-[0-9A-Za-z]+)?$ + type: string resourceTags: description: resourceTags is a list of additional tags to apply to Alibaba Cloud resources created for the cluster. - type: array - maxItems: 20 items: description: AlibabaCloudResourceTag is the set of tags to add to apply to resources. - type: object - required: - - key - - value properties: key: description: key is the key of the tag. - type: string maxLength: 128 minLength: 1 + type: string value: description: value is the value of the tag. - type: string maxLength: 128 minLength: 1 + type: string + required: + - key + - value + type: object + maxItems: 20 + type: array x-kubernetes-list-map-keys: - key x-kubernetes-list-type: map + required: + - region + type: object aws: description: AWS contains settings specific to the Amazon Web Services infrastructure provider. - type: object properties: region: description: region holds the default AWS region for new AWS resources created by the cluster. type: string resourceTags: description: resourceTags is a list of additional tags to apply to AWS resources created for the cluster. See https://docs.aws.amazon.com/general/latest/gr/aws_tagging.html for information on tagging AWS resources. AWS supports a maximum of 50 tags per resource. OpenShift reserves 25 tags for its use, leaving 25 tags available for the user. - type: array - maxItems: 25 items: description: AWSResourceTag is a tag to apply to AWS resources created for the cluster. - type: object - required: - - key - - value properties: key: description: key is the key of the tag - type: string maxLength: 128 minLength: 1 pattern: ^[0-9A-Za-z_.:/=+-@]+$ + type: string value: description: value is the value of the tag. Some AWS service do not support empty values. Since tags are added to resources in many services, the length of the tag value must meet the requirements of all services. - type: string maxLength: 256 minLength: 1 pattern: ^[0-9A-Za-z_.:/=+-@]+$ + type: string + required: + - key + - value + type: object + maxItems: 25 + type: array serviceEndpoints: description: ServiceEndpoints list contains custom endpoints which will override default service endpoint of AWS Services. There must be only one ServiceEndpoint for a service. - type: array items: description: AWSServiceEndpoint store the configuration of a custom url to override existing defaults of AWS Services. - type: object properties: name: description: name is the name of the AWS service. The list of all the service names can be found at https://docs.aws.amazon.com/general/latest/gr/aws-service-information.html This must be provided and cannot be empty. - type: string pattern: ^[a-z0-9-]+$ + type: string url: description: url is fully qualified URI with scheme https, that overrides the default generated endpoint for a client. This must be provided and cannot be empty. - type: string pattern: ^https:// + type: string + type: object + type: array + type: object azure: description: Azure contains settings specific to the Azure infrastructure provider. - type: object properties: armEndpoint: description: armEndpoint specifies a URL to use for resource management in non-soverign clouds such as Azure Stack. type: string cloudName: description: cloudName is the name of the Azure cloud environment which can be used to configure the Azure SDK with the appropriate Azure API endpoints. If empty, the value is equal to `AzurePublicCloud`. - type: string enum: - "" - AzurePublicCloud @@ -382,42 +516,72 @@ spec: - AzureChinaCloud - AzureGermanCloud - AzureStackCloud + type: string networkResourceGroupName: description: networkResourceGroupName is the Resource Group for network resources like the Virtual Network and Subnets used by the cluster. If empty, the value is same as ResourceGroupName. type: string resourceGroupName: description: resourceGroupName is the Resource Group for new Azure resources created for the cluster. type: string + resourceTags: + description: resourceTags is a list of additional tags to apply to Azure resources created for the cluster. See https://docs.microsoft.com/en-us/rest/api/resources/tags for information on tagging Azure resources. Due to limitations on Automation, Content Delivery Network, DNS Azure resources, a maximum of 15 tags may be applied. OpenShift reserves 5 tags for internal use, allowing 10 tags for user configuration. + items: + description: AzureResourceTag is a tag to apply to Azure resources created for the cluster. + properties: + key: + description: key is the key part of the tag. A tag key can have a maximum of 128 characters and cannot be empty. Key must begin with a letter, end with a letter, number or underscore, and must contain only alphanumeric characters and the following special characters `_ . -`. + maxLength: 128 + minLength: 1 + pattern: ^[a-zA-Z]([0-9A-Za-z_.-]*[0-9A-Za-z_])?$ + type: string + value: + description: 'value is the value part of the tag. A tag value can have a maximum of 256 characters and cannot be empty. Value must contain only alphanumeric characters and the following special characters `_ + , - . / : ; < = > ? @`.' + maxLength: 256 + minLength: 1 + pattern: ^[0-9A-Za-z_.=+-@]+$ + type: string + required: + - key + - value + type: object + maxItems: 10 + type: array + x-kubernetes-validations: + - message: resourceTags are immutable and may only be configured during installation + rule: self.all(x, x in oldSelf) && oldSelf.all(x, x in self) + type: object + x-kubernetes-validations: + - message: resourceTags may only be configured during installation + rule: '!has(oldSelf.resourceTags) && !has(self.resourceTags) || has(oldSelf.resourceTags) && has(self.resourceTags)' baremetal: description: BareMetal contains settings specific to the BareMetal platform. - type: object properties: apiServerInternalIP: description: "apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers. \n Deprecated: Use APIServerInternalIPs instead." type: string apiServerInternalIPs: description: apiServerInternalIPs are the IP addresses to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. These are the IPs for a self-hosted load balancer in front of the API servers. In dual stack clusters this list contains two IPs otherwise only one. - type: array format: ip - maxItems: 2 items: type: string + maxItems: 2 + type: array ingressIP: description: "ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names. \n Deprecated: Use IngressIPs instead." type: string ingressIPs: description: ingressIPs are the external IPs which route to the default ingress controller. The IPs are suitable targets of a wildcard DNS record used to resolve default route host names. In dual stack clusters this list contains two IPs otherwise only one. - type: array format: ip - maxItems: 2 items: type: string + maxItems: 2 + type: array nodeDNSIP: description: nodeDNSIP is the IP address for the internal DNS used by the nodes. Unlike the one managed by the DNS operator, `NodeDNSIP` provides name resolution for the nodes themselves. There is no DNS-as-a-service for BareMetal deployments. In order to minimize necessary changes to the datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames to the nodes in the cluster. type: string + type: object equinixMetal: description: EquinixMetal contains settings specific to the Equinix Metal infrastructure provider. - type: object properties: apiServerInternalIP: description: apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers. @@ -425,12 +589,33 @@ spec: ingressIP: description: ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names. type: string + type: object external: description: External contains settings specific to the generic External infrastructure provider. + properties: + cloudControllerManager: + description: cloudControllerManager contains settings specific to the external Cloud Controller Manager (a.k.a. CCM or CPI). When omitted, new nodes will be not tainted and no extra initialization from the cloud controller manager is expected. + properties: + state: + description: "state determines whether or not an external Cloud Controller Manager is expected to be installed within the cluster. https://kubernetes.io/docs/tasks/administer-cluster/running-cloud-controller/#running-cloud-controller-manager \n Valid values are \"External\", \"None\" and omitted. When set to \"External\", new nodes will be tainted as uninitialized when created, preventing them from running workloads until they are initialized by the cloud controller manager. When omitted or set to \"None\", new nodes will be not tainted and no extra initialization from the cloud controller manager is expected." + enum: + - "" + - External + - None + type: string + x-kubernetes-validations: + - message: state is immutable once set + rule: self == oldSelf + type: object + x-kubernetes-validations: + - message: state may not be added or removed once set + rule: (has(self.state) == has(oldSelf.state)) || (!has(oldSelf.state) && self.state != "External") type: object + x-kubernetes-validations: + - message: cloudControllerManager may not be added or removed once set + rule: has(self.cloudControllerManager) == has(oldSelf.cloudControllerManager) gcp: description: GCP contains settings specific to the Google Cloud Platform infrastructure provider. - type: object properties: projectID: description: resourceGroupName is the Project ID for new GCP resources created for the cluster. @@ -438,9 +623,9 @@ spec: region: description: region holds the region for new GCP resources created for the cluster. type: string + type: object ibmcloud: description: IBMCloud contains settings specific to the IBMCloud infrastructure provider. - type: object properties: cisInstanceCRN: description: CISInstanceCRN is the CRN of the Cloud Internet Services instance managing the DNS zone for the cluster's base domain @@ -457,9 +642,9 @@ spec: resourceGroupName: description: ResourceGroupName is the Resource Group for new IBMCloud resources created for the cluster. type: string + type: object kubevirt: description: Kubevirt contains settings specific to the kubevirt infrastructure provider. - type: object properties: apiServerInternalIP: description: apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers. @@ -467,44 +652,44 @@ spec: ingressIP: description: ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names. type: string + type: object nutanix: description: Nutanix contains settings specific to the Nutanix infrastructure provider. - type: object properties: apiServerInternalIP: description: "apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers. \n Deprecated: Use APIServerInternalIPs instead." type: string apiServerInternalIPs: description: apiServerInternalIPs are the IP addresses to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. These are the IPs for a self-hosted load balancer in front of the API servers. In dual stack clusters this list contains two IPs otherwise only one. - type: array format: ip - maxItems: 2 items: type: string + maxItems: 2 + type: array ingressIP: description: "ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names. \n Deprecated: Use IngressIPs instead." type: string ingressIPs: description: ingressIPs are the external IPs which route to the default ingress controller. The IPs are suitable targets of a wildcard DNS record used to resolve default route host names. In dual stack clusters this list contains two IPs otherwise only one. - type: array format: ip - maxItems: 2 items: type: string + maxItems: 2 + type: array + type: object openstack: description: OpenStack contains settings specific to the OpenStack infrastructure provider. - type: object properties: apiServerInternalIP: description: "apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers. \n Deprecated: Use APIServerInternalIPs instead." type: string apiServerInternalIPs: description: apiServerInternalIPs are the IP addresses to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. These are the IPs for a self-hosted load balancer in front of the API servers. In dual stack clusters this list contains two IPs otherwise only one. - type: array format: ip - maxItems: 2 items: type: string + maxItems: 2 + type: array cloudName: description: cloudName is the name of the desired OpenStack cloud in the client configuration file (`clouds.yaml`). type: string @@ -513,44 +698,60 @@ spec: type: string ingressIPs: description: ingressIPs are the external IPs which route to the default ingress controller. The IPs are suitable targets of a wildcard DNS record used to resolve default route host names. In dual stack clusters this list contains two IPs otherwise only one. - type: array format: ip - maxItems: 2 items: type: string + maxItems: 2 + type: array + loadBalancer: + default: + type: OpenShiftManagedDefault + description: loadBalancer defines how the load balancer used by the cluster is configured. + properties: + type: + default: OpenShiftManagedDefault + description: type defines the type of load balancer used by the cluster on OpenStack platform which can be a user-managed or openshift-managed load balancer that is to be used for the OpenShift API and Ingress endpoints. When set to OpenShiftManagedDefault the static pods in charge of API and Ingress traffic load-balancing defined in the machine config operator will be deployed. When set to UserManaged these static pods will not be deployed and it is expected that the load balancer is configured out of band by the deployer. When omitted, this means no opinion and the platform is left to choose a reasonable default. The default value is OpenShiftManagedDefault. + enum: + - OpenShiftManagedDefault + - UserManaged + type: string + x-kubernetes-validations: + - message: type is immutable once set + rule: oldSelf == '' || self == oldSelf + type: object nodeDNSIP: description: nodeDNSIP is the IP address for the internal DNS used by the nodes. Unlike the one managed by the DNS operator, `NodeDNSIP` provides name resolution for the nodes themselves. There is no DNS-as-a-service for OpenStack deployments. In order to minimize necessary changes to the datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames to the nodes in the cluster. type: string + type: object ovirt: description: Ovirt contains settings specific to the oVirt infrastructure provider. - type: object properties: apiServerInternalIP: description: "apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers. \n Deprecated: Use APIServerInternalIPs instead." type: string apiServerInternalIPs: description: apiServerInternalIPs are the IP addresses to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. These are the IPs for a self-hosted load balancer in front of the API servers. In dual stack clusters this list contains two IPs otherwise only one. - type: array format: ip - maxItems: 2 items: type: string + maxItems: 2 + type: array ingressIP: description: "ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names. \n Deprecated: Use IngressIPs instead." type: string ingressIPs: description: ingressIPs are the external IPs which route to the default ingress controller. The IPs are suitable targets of a wildcard DNS record used to resolve default route host names. In dual stack clusters this list contains two IPs otherwise only one. - type: array format: ip - maxItems: 2 items: type: string + maxItems: 2 + type: array nodeDNSIP: description: 'deprecated: as of 4.6, this field is no longer set or honored. It will be removed in a future release.' type: string + type: object powervs: description: PowerVS contains settings specific to the Power Systems Virtual Servers infrastructure provider. - type: object properties: cisInstanceCRN: description: CISInstanceCRN is the CRN of the Cloud Internet Services instance managing the DNS zone for the cluster's base domain @@ -561,31 +762,42 @@ spec: region: description: region holds the default Power VS region for new Power VS resources created by the cluster. type: string + resourceGroup: + description: 'resourceGroup is the resource group name for new IBMCloud resources created for a cluster. The resource group specified here will be used by cluster-image-registry-operator to set up a COS Instance in IBMCloud for the cluster registry. More about resource groups can be found here: https://cloud.ibm.com/docs/account?topic=account-rgs. When omitted, the image registry operator won''t be able to configure storage, which results in the image registry cluster operator not being in an available state.' + maxLength: 40 + pattern: ^[a-zA-Z0-9-_ ]+$ + type: string + x-kubernetes-validations: + - message: resourceGroup is immutable once set + rule: oldSelf == '' || self == oldSelf serviceEndpoints: description: serviceEndpoints is a list of custom endpoints which will override the default service endpoints of a Power VS service. - type: array items: description: PowervsServiceEndpoint stores the configuration of a custom url to override existing defaults of PowerVS Services. - type: object - required: - - name - - url properties: name: description: name is the name of the Power VS service. Few of the services are IAM - https://cloud.ibm.com/apidocs/iam-identity-token-api ResourceController - https://cloud.ibm.com/apidocs/resource-controller/resource-controller Power Cloud - https://cloud.ibm.com/apidocs/power-cloud - type: string pattern: ^[a-z0-9-]+$ + type: string url: description: url is fully qualified URI with scheme https, that overrides the default generated endpoint for a client. This must be provided and cannot be empty. - type: string format: uri pattern: ^https:// + type: string + required: + - name + - url + type: object + type: array zone: description: 'zone holds the default zone for the new Power VS resources created by the cluster. Note: Currently only single-zone OCP clusters are supported' type: string + type: object + x-kubernetes-validations: + - message: cannot unset resourceGroup once set + rule: '!has(oldSelf.resourceGroup) || has(self.resourceGroup)' type: description: "type is the underlying infrastructure provider for the cluster. This value controls whether infrastructure automation such as service load balancers, dynamic volume provisioning, machine creation and deletion, and other integrations are enabled. If None, no infrastructure automation is enabled. Allowed values are \"AWS\", \"Azure\", \"BareMetal\", \"GCP\", \"Libvirt\", \"OpenStack\", \"VSphere\", \"oVirt\", \"EquinixMetal\", \"PowerVS\", \"AlibabaCloud\", \"Nutanix\" and \"None\". Individual components may not support all platforms, and must handle unrecognized platforms as None if they do not support that platform. \n This value will be synced with to the `status.platform` and `status.platformStatus.type`. Currently this value cannot be changed once set." - type: string enum: - "" - AWS @@ -604,33 +816,39 @@ spec: - AlibabaCloud - Nutanix - External + type: string vsphere: description: VSphere contains settings specific to the VSphere infrastructure provider. - type: object properties: apiServerInternalIP: description: "apiServerInternalIP is an IP address to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. It is the IP that the Infrastructure.status.apiServerInternalURI points to. It is the IP for a self-hosted load balancer in front of the API servers. \n Deprecated: Use APIServerInternalIPs instead." type: string apiServerInternalIPs: description: apiServerInternalIPs are the IP addresses to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. These are the IPs for a self-hosted load balancer in front of the API servers. In dual stack clusters this list contains two IPs otherwise only one. - type: array format: ip - maxItems: 2 items: type: string + maxItems: 2 + type: array ingressIP: description: "ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names. \n Deprecated: Use IngressIPs instead." type: string ingressIPs: description: ingressIPs are the external IPs which route to the default ingress controller. The IPs are suitable targets of a wildcard DNS record used to resolve default route host names. In dual stack clusters this list contains two IPs otherwise only one. - type: array format: ip - maxItems: 2 items: type: string + maxItems: 2 + type: array nodeDNSIP: description: nodeDNSIP is the IP address for the internal DNS used by the nodes. Unlike the one managed by the DNS operator, `NodeDNSIP` provides name resolution for the nodes themselves. There is no DNS-as-a-service for vSphere deployments. In order to minimize necessary changes to the datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames to the nodes in the cluster. type: string + type: object + type: object + type: object + required: + - spec + type: object served: true storage: true subresources: diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure-Default.crd.yaml-patch b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure-Default.crd.yaml-patch new file mode 100644 index 000000000..d127130ad --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure-Default.crd.yaml-patch @@ -0,0 +1,24 @@ +- op: add + path: /spec/versions/name=v1/schema/openAPIV3Schema/properties/spec/properties/platformSpec/properties/vsphere/properties/vcenters/items/properties/server/anyOf + value: + - format: ipv4 + - format: ipv6 + - format: hostname +- op: add + path: /spec/versions/name=v1/schema/openAPIV3Schema/properties/spec/properties/platformSpec/properties/vsphere/properties/failureDomains/items/properties/server/anyOf + value: + - format: ipv4 + - format: ipv6 + - format: hostname +- op: add + path: /spec/versions/name=v1/schema/openAPIV3Schema/properties/spec/properties/platformSpec/properties/vsphere/properties/nodeNetworking/properties/external/properties/excludeNetworkSubnetCidr/items/format + value: cidr +- op: add + path: /spec/versions/name=v1/schema/openAPIV3Schema/properties/spec/properties/platformSpec/properties/vsphere/properties/nodeNetworking/properties/external/properties/networkSubnetCidr/items/format + value: cidr +- op: add + path: /spec/versions/name=v1/schema/openAPIV3Schema/properties/spec/properties/platformSpec/properties/vsphere/properties/nodeNetworking/properties/internal/properties/excludeNetworkSubnetCidr/items/format + value: cidr +- op: add + path: /spec/versions/name=v1/schema/openAPIV3Schema/properties/spec/properties/platformSpec/properties/vsphere/properties/nodeNetworking/properties/internal/properties/networkSubnetCidr/items/format + value: cidr diff --git a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure-TechPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure-TechPreviewNoUpgrade.crd.yaml index 01eeb0928..0698bc680 100644 --- a/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure-TechPreviewNoUpgrade.crd.yaml +++ b/vendor/github.com/openshift/api/config/v1/0000_10_config-operator_01_infrastructure-TechPreviewNoUpgrade.crd.yaml @@ -79,17 +79,6 @@ spec: external: description: ExternalPlatformType represents generic infrastructure provider. Platform-specific components should be supplemented separately. properties: - cloudControllerManager: - description: CloudControllerManager contains settings specific to the external Cloud Controller Manager (a.k.a. CCM or CPI) - properties: - state: - description: "state determines whether or not an external Cloud Controller Manager is expected to be installed within the cluster. https://kubernetes.io/docs/tasks/administer-cluster/running-cloud-controller/#running-cloud-controller-manager \n When set to \"External\", new nodes will be tainted as uninitialized when created, preventing them from running workloads until they are initialized by the cloud controller manager. When omitted or set to \"None\", new nodes will be not tainted and no extra initialization from the cloud controller manager is expected." - enum: - - "" - - External - - None - type: string - type: object platformName: default: Unknown description: PlatformName holds the arbitrary string representing the infrastructure provider name, expected to be set at the installation time. This field is solely for informational and reporting purposes and is not expected to be used for decision-making. @@ -392,6 +381,13 @@ spec: - SingleReplica - External type: string + cpuPartitioning: + default: None + description: cpuPartitioning expresses if CPU partitioning is a currently enabled feature in the cluster. CPU Partitioning means that this cluster can support partitioning workloads to specific CPU Sets. Valid values are "None" and "AllNodes". When omitted, the default value is "None". The default value of "None" indicates that no nodes will be setup with CPU partitioning. The "AllNodes" value indicates that all nodes have been setup with CPU partitioning, and can then be further configured via the PerformanceProfile API. + enum: + - None + - AllNodes + type: string etcdDiscoveryDomain: description: 'etcdDiscoveryDomain is the domain used to fetch the SRV records for discovering etcd servers and clients. For more info: https://github.com/etcd-io/etcd/blob/329be66e8b3f9e2e6af83c123ff89297e49ebd15/Documentation/op-guide/clustering.md#dns-discovery deprecated: as of 4.7, this field is no longer set or honored. It will be removed in a future release.' type: string @@ -534,7 +530,36 @@ spec: resourceGroupName: description: resourceGroupName is the Resource Group for new Azure resources created for the cluster. type: string + resourceTags: + description: resourceTags is a list of additional tags to apply to Azure resources created for the cluster. See https://docs.microsoft.com/en-us/rest/api/resources/tags for information on tagging Azure resources. Due to limitations on Automation, Content Delivery Network, DNS Azure resources, a maximum of 15 tags may be applied. OpenShift reserves 5 tags for internal use, allowing 10 tags for user configuration. + items: + description: AzureResourceTag is a tag to apply to Azure resources created for the cluster. + properties: + key: + description: key is the key part of the tag. A tag key can have a maximum of 128 characters and cannot be empty. Key must begin with a letter, end with a letter, number or underscore, and must contain only alphanumeric characters and the following special characters `_ . -`. + maxLength: 128 + minLength: 1 + pattern: ^[a-zA-Z]([0-9A-Za-z_.-]*[0-9A-Za-z_])?$ + type: string + value: + description: 'value is the value part of the tag. A tag value can have a maximum of 256 characters and cannot be empty. Value must contain only alphanumeric characters and the following special characters `_ + , - . / : ; < = > ? @`.' + maxLength: 256 + minLength: 1 + pattern: ^[0-9A-Za-z_.=+-@]+$ + type: string + required: + - key + - value + type: object + maxItems: 10 + type: array + x-kubernetes-validations: + - message: resourceTags are immutable and may only be configured during installation + rule: self.all(x, x in oldSelf) && oldSelf.all(x, x in self) type: object + x-kubernetes-validations: + - message: resourceTags may only be configured during installation + rule: '!has(oldSelf.resourceTags) && !has(self.resourceTags) || has(oldSelf.resourceTags) && has(self.resourceTags)' baremetal: description: BareMetal contains settings specific to the BareMetal platform. properties: @@ -558,6 +583,22 @@ spec: type: string maxItems: 2 type: array + loadBalancer: + default: + type: OpenShiftManagedDefault + description: loadBalancer defines how the load balancer used by the cluster is configured. + properties: + type: + default: OpenShiftManagedDefault + description: type defines the type of load balancer used by the cluster on BareMetal platform which can be a user-managed or openshift-managed load balancer that is to be used for the OpenShift API and Ingress endpoints. When set to OpenShiftManagedDefault the static pods in charge of API and Ingress traffic load-balancing defined in the machine config operator will be deployed. When set to UserManaged these static pods will not be deployed and it is expected that the load balancer is configured out of band by the deployer. When omitted, this means no opinion and the platform is left to choose a reasonable default. The default value is OpenShiftManagedDefault. + enum: + - OpenShiftManagedDefault + - UserManaged + type: string + x-kubernetes-validations: + - message: type is immutable once set + rule: oldSelf == '' || self == oldSelf + type: object nodeDNSIP: description: nodeDNSIP is the IP address for the internal DNS used by the nodes. Unlike the one managed by the DNS operator, `NodeDNSIP` provides name resolution for the nodes themselves. There is no DNS-as-a-service for BareMetal deployments. In order to minimize necessary changes to the datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames to the nodes in the cluster. type: string @@ -574,7 +615,28 @@ spec: type: object external: description: External contains settings specific to the generic External infrastructure provider. + properties: + cloudControllerManager: + description: cloudControllerManager contains settings specific to the external Cloud Controller Manager (a.k.a. CCM or CPI). When omitted, new nodes will be not tainted and no extra initialization from the cloud controller manager is expected. + properties: + state: + description: "state determines whether or not an external Cloud Controller Manager is expected to be installed within the cluster. https://kubernetes.io/docs/tasks/administer-cluster/running-cloud-controller/#running-cloud-controller-manager \n Valid values are \"External\", \"None\" and omitted. When set to \"External\", new nodes will be tainted as uninitialized when created, preventing them from running workloads until they are initialized by the cloud controller manager. When omitted or set to \"None\", new nodes will be not tainted and no extra initialization from the cloud controller manager is expected." + enum: + - "" + - External + - None + type: string + x-kubernetes-validations: + - message: state is immutable once set + rule: self == oldSelf + type: object + x-kubernetes-validations: + - message: state may not be added or removed once set + rule: (has(self.state) == has(oldSelf.state)) || (!has(oldSelf.state) && self.state != "External") type: object + x-kubernetes-validations: + - message: cloudControllerManager may not be added or removed once set + rule: has(self.cloudControllerManager) == has(oldSelf.cloudControllerManager) gcp: description: GCP contains settings specific to the Google Cloud Platform infrastructure provider. properties: @@ -584,7 +646,80 @@ spec: region: description: region holds the region for new GCP resources created for the cluster. type: string + resourceLabels: + description: resourceLabels is a list of additional labels to apply to GCP resources created for the cluster. See https://cloud.google.com/compute/docs/labeling-resources for information on labeling GCP resources. GCP supports a maximum of 64 labels per resource. OpenShift reserves 32 labels for internal use, allowing 32 labels for user configuration. + items: + description: GCPResourceLabel is a label to apply to GCP resources created for the cluster. + properties: + key: + description: key is the key part of the label. A label key can have a maximum of 63 characters and cannot be empty. Label key must begin with a lowercase letter, and must contain only lowercase letters, numeric characters, and the following special characters `_-`. Label key must not have the reserved prefixes `kubernetes-io` and `openshift-io`. + maxLength: 63 + minLength: 1 + pattern: ^[a-z][0-9a-z_-]+$ + type: string + x-kubernetes-validations: + - message: label keys must not start with either `openshift-io` or `kubernetes-io` + rule: '!self.startsWith(''openshift-io'') && !self.startsWith(''kubernetes-io'')' + value: + description: value is the value part of the label. A label value can have a maximum of 63 characters and cannot be empty. Value must contain only lowercase letters, numeric characters, and the following special characters `_-`. + maxLength: 63 + minLength: 1 + pattern: ^[0-9a-z_-]+$ + type: string + required: + - key + - value + type: object + maxItems: 32 + type: array + x-kubernetes-list-map-keys: + - key + x-kubernetes-list-type: map + x-kubernetes-validations: + - message: resourceLabels are immutable and may only be configured during installation + rule: self.all(x, x in oldSelf) && oldSelf.all(x, x in self) + resourceTags: + description: resourceTags is a list of additional tags to apply to GCP resources created for the cluster. See https://cloud.google.com/resource-manager/docs/tags/tags-overview for information on tagging GCP resources. GCP supports a maximum of 50 tags per resource. + items: + description: GCPResourceTag is a tag to apply to GCP resources created for the cluster. + properties: + key: + description: key is the key part of the tag. A tag key can have a maximum of 63 characters and cannot be empty. Tag key must begin and end with an alphanumeric character, and must contain only uppercase, lowercase alphanumeric characters, and the following special characters `._-`. + maxLength: 63 + minLength: 1 + pattern: ^[a-zA-Z0-9]([0-9A-Za-z_.-]{0,61}[a-zA-Z0-9])?$ + type: string + parentID: + description: 'parentID is the ID of the hierarchical resource where the tags are defined, e.g. at the Organization or the Project level. To find the Organization or Project ID refer to the following pages: https://cloud.google.com/resource-manager/docs/creating-managing-organization#retrieving_your_organization_id, https://cloud.google.com/resource-manager/docs/creating-managing-projects#identifying_projects. An OrganizationID must consist of decimal numbers, and cannot have leading zeroes. A ProjectID must be 6 to 30 characters in length, can only contain lowercase letters, numbers, and hyphens, and must start with a letter, and cannot end with a hyphen.' + maxLength: 32 + minLength: 1 + pattern: (^[1-9][0-9]{0,31}$)|(^[a-z][a-z0-9-]{4,28}[a-z0-9]$) + type: string + value: + description: value is the value part of the tag. A tag value can have a maximum of 63 characters and cannot be empty. Tag value must begin and end with an alphanumeric character, and must contain only uppercase, lowercase alphanumeric characters, and the following special characters `_-.@%=+:,*#&(){}[]` and spaces. + maxLength: 63 + minLength: 1 + pattern: ^[a-zA-Z0-9]([0-9A-Za-z_.@%=+:,*#&()\[\]{}\-\s]{0,61}[a-zA-Z0-9])?$ + type: string + required: + - key + - parentID + - value + type: object + maxItems: 50 + type: array + x-kubernetes-list-map-keys: + - key + x-kubernetes-list-type: map + x-kubernetes-validations: + - message: resourceTags are immutable and may only be configured during installation + rule: self.all(x, x in oldSelf) && oldSelf.all(x, x in self) type: object + x-kubernetes-validations: + - message: resourceLabels may only be configured during installation + rule: '!has(oldSelf.resourceLabels) && !has(self.resourceLabels) || has(oldSelf.resourceLabels) && has(self.resourceLabels)' + - message: resourceTags may only be configured during installation + rule: '!has(oldSelf.resourceTags) && !has(self.resourceTags) || has(oldSelf.resourceTags) && has(self.resourceTags)' ibmcloud: description: IBMCloud contains settings specific to the IBMCloud infrastructure provider. properties: @@ -637,6 +772,22 @@ spec: type: string maxItems: 2 type: array + loadBalancer: + default: + type: OpenShiftManagedDefault + description: loadBalancer defines how the load balancer used by the cluster is configured. + properties: + type: + default: OpenShiftManagedDefault + description: type defines the type of load balancer used by the cluster on Nutanix platform which can be a user-managed or openshift-managed load balancer that is to be used for the OpenShift API and Ingress endpoints. When set to OpenShiftManagedDefault the static pods in charge of API and Ingress traffic load-balancing defined in the machine config operator will be deployed. When set to UserManaged these static pods will not be deployed and it is expected that the load balancer is configured out of band by the deployer. When omitted, this means no opinion and the platform is left to choose a reasonable default. The default value is OpenShiftManagedDefault. + enum: + - OpenShiftManagedDefault + - UserManaged + type: string + x-kubernetes-validations: + - message: type is immutable once set + rule: oldSelf == '' || self == oldSelf + type: object type: object openstack: description: OpenStack contains settings specific to the OpenStack infrastructure provider. @@ -664,6 +815,22 @@ spec: type: string maxItems: 2 type: array + loadBalancer: + default: + type: OpenShiftManagedDefault + description: loadBalancer defines how the load balancer used by the cluster is configured. + properties: + type: + default: OpenShiftManagedDefault + description: type defines the type of load balancer used by the cluster on OpenStack platform which can be a user-managed or openshift-managed load balancer that is to be used for the OpenShift API and Ingress endpoints. When set to OpenShiftManagedDefault the static pods in charge of API and Ingress traffic load-balancing defined in the machine config operator will be deployed. When set to UserManaged these static pods will not be deployed and it is expected that the load balancer is configured out of band by the deployer. When omitted, this means no opinion and the platform is left to choose a reasonable default. The default value is OpenShiftManagedDefault. + enum: + - OpenShiftManagedDefault + - UserManaged + type: string + x-kubernetes-validations: + - message: type is immutable once set + rule: oldSelf == '' || self == oldSelf + type: object nodeDNSIP: description: nodeDNSIP is the IP address for the internal DNS used by the nodes. Unlike the one managed by the DNS operator, `NodeDNSIP` provides name resolution for the nodes themselves. There is no DNS-as-a-service for OpenStack deployments. In order to minimize necessary changes to the datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames to the nodes in the cluster. type: string @@ -691,6 +858,22 @@ spec: type: string maxItems: 2 type: array + loadBalancer: + default: + type: OpenShiftManagedDefault + description: loadBalancer defines how the load balancer used by the cluster is configured. + properties: + type: + default: OpenShiftManagedDefault + description: type defines the type of load balancer used by the cluster on Ovirt platform which can be a user-managed or openshift-managed load balancer that is to be used for the OpenShift API and Ingress endpoints. When set to OpenShiftManagedDefault the static pods in charge of API and Ingress traffic load-balancing defined in the machine config operator will be deployed. When set to UserManaged these static pods will not be deployed and it is expected that the load balancer is configured out of band by the deployer. When omitted, this means no opinion and the platform is left to choose a reasonable default. The default value is OpenShiftManagedDefault. + enum: + - OpenShiftManagedDefault + - UserManaged + type: string + x-kubernetes-validations: + - message: type is immutable once set + rule: oldSelf == '' || self == oldSelf + type: object nodeDNSIP: description: 'deprecated: as of 4.6, this field is no longer set or honored. It will be removed in a future release.' type: string @@ -707,6 +890,14 @@ spec: region: description: region holds the default Power VS region for new Power VS resources created by the cluster. type: string + resourceGroup: + description: 'resourceGroup is the resource group name for new IBMCloud resources created for a cluster. The resource group specified here will be used by cluster-image-registry-operator to set up a COS Instance in IBMCloud for the cluster registry. More about resource groups can be found here: https://cloud.ibm.com/docs/account?topic=account-rgs. When omitted, the image registry operator won''t be able to configure storage, which results in the image registry cluster operator not being in an available state.' + maxLength: 40 + pattern: ^[a-zA-Z0-9-_ ]+$ + type: string + x-kubernetes-validations: + - message: resourceGroup is immutable once set + rule: oldSelf == '' || self == oldSelf serviceEndpoints: description: serviceEndpoints is a list of custom endpoints which will override the default service endpoints of a Power VS service. items: @@ -730,6 +921,9 @@ spec: description: 'zone holds the default zone for the new Power VS resources created by the cluster. Note: Currently only single-zone OCP clusters are supported' type: string type: object + x-kubernetes-validations: + - message: cannot unset resourceGroup once set + rule: '!has(oldSelf.resourceGroup) || has(self.resourceGroup)' type: description: "type is the underlying infrastructure provider for the cluster. This value controls whether infrastructure automation such as service load balancers, dynamic volume provisioning, machine creation and deletion, and other integrations are enabled. If None, no infrastructure automation is enabled. Allowed values are \"AWS\", \"Azure\", \"BareMetal\", \"GCP\", \"Libvirt\", \"OpenStack\", \"VSphere\", \"oVirt\", \"EquinixMetal\", \"PowerVS\", \"AlibabaCloud\", \"Nutanix\" and \"None\". Individual components may not support all platforms, and must handle unrecognized platforms as None if they do not support that platform. \n This value will be synced with to the `status.platform` and `status.platformStatus.type`. Currently this value cannot be changed once set." enum: @@ -774,6 +968,22 @@ spec: type: string maxItems: 2 type: array + loadBalancer: + default: + type: OpenShiftManagedDefault + description: loadBalancer defines how the load balancer used by the cluster is configured. + properties: + type: + default: OpenShiftManagedDefault + description: type defines the type of load balancer used by the cluster on VSphere platform which can be a user-managed or openshift-managed load balancer that is to be used for the OpenShift API and Ingress endpoints. When set to OpenShiftManagedDefault the static pods in charge of API and Ingress traffic load-balancing defined in the machine config operator will be deployed. When set to UserManaged these static pods will not be deployed and it is expected that the load balancer is configured out of band by the deployer. When omitted, this means no opinion and the platform is left to choose a reasonable default. The default value is OpenShiftManagedDefault. + enum: + - OpenShiftManagedDefault + - UserManaged + type: string + x-kubernetes-validations: + - message: type is immutable once set + rule: oldSelf == '' || self == oldSelf + type: object nodeDNSIP: description: nodeDNSIP is the IP address for the internal DNS used by the nodes. Unlike the one managed by the DNS operator, `NodeDNSIP` provides name resolution for the nodes themselves. There is no DNS-as-a-service for vSphere deployments. In order to minimize necessary changes to the datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames to the nodes in the cluster. type: string diff --git a/vendor/github.com/openshift/api/config/v1/custom.apiserver.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/custom.apiserver.testsuite.yaml new file mode 100644 index 000000000..5e2dea3ea --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/custom.apiserver.testsuite.yaml @@ -0,0 +1,35 @@ +apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this +name: "[CustomNoUpgrade] APIServer" +crd: 0000_10_config-operator_01_apiserver-CustomNoUpgrade.crd.yaml +tests: + onCreate: + - name: Should be able to create encrypt with aescbc + initial: | + apiVersion: config.openshift.io/v1 + kind: APIServer + spec: + encryption: + type: aescbc + expected: | + apiVersion: config.openshift.io/v1 + kind: APIServer + spec: + audit: + profile: Default + encryption: + type: aescbc + - name: Should be able to create encrypt with aesgcm + initial: | + apiVersion: config.openshift.io/v1 + kind: APIServer + spec: + encryption: + type: aesgcm + expected: | + apiVersion: config.openshift.io/v1 + kind: APIServer + spec: + audit: + profile: Default + encryption: + type: aesgcm diff --git a/vendor/github.com/openshift/api/config/v1/custom.dns.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/custom.dns.testsuite.yaml new file mode 100644 index 000000000..ab1a123b6 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/custom.dns.testsuite.yaml @@ -0,0 +1,104 @@ +apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this +name: "[Custom] DNS" +crd: 0000_10_config-operator_01_dns-CustomNoUpgrade.crd.yaml +tests: + onCreate: + - name: Should be able to create a minimal DNS + initial: | + apiVersion: config.openshift.io/v1 + kind: DNS + spec: {} # No spec is required for a DNS + expected: | + apiVersion: config.openshift.io/v1 + kind: DNS + spec: {} + - name: Should be able to specify an AWS role ARN for a private hosted zone + initial: | + apiVersion: config.openshift.io/v1 + kind: DNS + spec: + platform: + type: AWS + aws: + privateZoneIAMRole: arn:aws:iam::123456789012:role/foo + expected: | + apiVersion: config.openshift.io/v1 + kind: DNS + spec: + platform: + type: AWS + aws: + privateZoneIAMRole: arn:aws:iam::123456789012:role/foo + - name: Should not be able to specify unsupported platform + initial: | + apiVersion: config.openshift.io/v1 + kind: DNS + spec: + platform: + type: Azure + azure: + privateZoneIAMRole: arn:aws:iam::123456789012:role/foo + expectedError: "Invalid value: \"string\": allowed values are '' and 'AWS'" + - name: Should not be able to specify invalid AWS role ARN + initial: | + apiVersion: config.openshift.io/v1 + kind: DNS + metadata: + name: cluster + spec: + platform: + type: AWS + aws: + privateZoneIAMRole: arn:aws:iam:bad:123456789012:role/foo + expectedError: "DNS.config.openshift.io \"cluster\" is invalid: spec.platform.aws.privateZoneIAMRole: Invalid value: \"arn:aws:iam:bad:123456789012:role/foo\": spec.platform.aws.privateZoneIAMRole in body should match '^arn:(aws|aws-cn|aws-us-gov):iam::[0-9]{12}:role\\/.*$'" + - name: Should not be able to specify different type and platform + initial: | + apiVersion: config.openshift.io/v1 + kind: DNS + spec: + platform: + type: "" + aws: + privateZoneIAMRole: arn:aws:iam::123456789012:role/foo + expectedError: "Invalid value: \"object\": aws configuration is required when platform is AWS, and forbidden otherwise" + onUpdate: + - name: Can switch from empty (default), to AWS + initial: | + apiVersion: config.openshift.io/v1 + kind: DNS + spec: + platform: + type: "" + updated: | + apiVersion: config.openshift.io/v1 + kind: DNS + spec: + platform: + type: AWS + aws: + privateZoneIAMRole: arn:aws:iam::123456789012:role/foo + expected: | + apiVersion: config.openshift.io/v1 + kind: DNS + spec: + platform: + type: AWS + aws: + privateZoneIAMRole: arn:aws:iam::123456789012:role/foo + - name: Upgrade case is valid + initial: | + apiVersion: config.openshift.io/v1 + kind: DNS + spec: {} # No spec is required for a DNS + updated: | + apiVersion: config.openshift.io/v1 + kind: DNS + spec: + platform: + type: "" + expected: | + apiVersion: config.openshift.io/v1 + kind: DNS + spec: + platform: + type: "" diff --git a/vendor/github.com/openshift/api/config/v1/custom.infrastructure.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/custom.infrastructure.testsuite.yaml new file mode 100644 index 000000000..24433f4f7 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/custom.infrastructure.testsuite.yaml @@ -0,0 +1,321 @@ +apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this +name: "[Custom] Infrastructure" +crd: 0000_10_config-operator_01_infrastructure-CustomNoUpgrade.crd.yaml +tests: + onCreate: + - name: Should be able to create a minimal Infrastructure + initial: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} # No spec is required for a Infrastructure + expected: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + onUpdate: + - name: Should not be able to modify an existing GCP ResourceLabels Label + initial: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + controlPlaneTopology: "HighlyAvailable" + infrastructureTopology: "HighlyAvailable" + platform: GCP + platformStatus: + type: GCP + gcp: + resourceLabels: + - {key: "key", value: "value"} + updated: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + platform: GCP + platformStatus: + type: GCP + gcp: + resourceLabels: + - {key: "key", value: "changed"} + expectedStatusError: "status.platformStatus.gcp.resourceLabels: Invalid value: \"array\": resourceLabels are immutable and may only be configured during installation" + - name: Should not be able to add a Label to an existing GCP ResourceLabels + initial: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + controlPlaneTopology: "HighlyAvailable" + infrastructureTopology: "HighlyAvailable" + platform: GCP + platformStatus: + type: GCP + gcp: + resourceLabels: + - {key: "key", value: "value"} + updated: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + platform: GCP + platformStatus: + type: GCP + gcp: + resourceLabels: + - {key: "key", value: "value"} + - {key: "new", value: "entry"} + expectedStatusError: "status.platformStatus.gcp.resourceLabels: Invalid value: \"array\": resourceLabels are immutable and may only be configured during installation" + - name: Should not be able to remove a Label from an existing GCP ResourceLabels + initial: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + platform: GCP + platformStatus: + type: GCP + gcp: + resourceLabels: + - {key: "key", value: "value"} + - {key: "new", value: "entry"} + updated: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + platform: GCP + platformStatus: + type: GCP + gcp: + resourceLabels: + - {key: "key", value: "value"} + expectedStatusError: "status.platformStatus.gcp.resourceLabels: Invalid value: \"array\": resourceLabels are immutable and may only be configured during installation" + - name: Should not be able to add GCP ResourceLabels to an empty platformStatus.gcp + initial: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + platform: GCP + platformStatus: + type: GCP + gcp: {} + updated: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + platform: GCP + platformStatus: + gcp: + resourceLabels: + - {key: "key", value: "value"} + expectedStatusError: "status.platformStatus.gcp: Invalid value: \"object\": resourceLabels may only be configured during installation" + - name: Should not be able to remove GCP ResourceLabels from platformStatus.gcp + initial: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + platform: GCP + platformStatus: + type: GCP + gcp: + resourceLabels: + - {key: "key", value: "value"} + updated: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + platform: GCP + platformStatus: + type: GCP + gcp: {} + expectedStatusError: "status.platformStatus.gcp: Invalid value: \"object\": resourceLabels may only be configured during installation" + - name: Should not have label key start with openshift-io for GCP ResourceLabels in platformStatus.gcp + initial: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: {} + updated: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + platform: GCP + platformStatus: + type: GCP + gcp: + resourceLabels: + - {key: "key", value: "value"} + - {key: "openshift-io-created-cluster", value: "true"} + expectedStatusError: "status.platformStatus.gcp.resourceLabels[1].key: Invalid value: \"string\": label keys must not start with either `openshift-io` or `kubernetes-io`" + - name: Should not have label key start with kubernetes-io for GCP ResourceLabels in platformStatus.gcp + initial: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: {} + updated: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + platform: GCP + platformStatus: + type: GCP + gcp: + resourceLabels: + - {key: "key", value: "value"} + - {key: "kubernetes-io-created-cluster", value: "true"} + expectedStatusError: "status.platformStatus.gcp.resourceLabels[1].key: Invalid value: \"string\": label keys must not start with either `openshift-io` or `kubernetes-io`" + - name: Should not be able to modify an existing GCP ResourceTags Tag + initial: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + controlPlaneTopology: "HighlyAvailable" + infrastructureTopology: "HighlyAvailable" + platform: GCP + platformStatus: + type: GCP + gcp: + resourceTags: + - {parentID: "1234567890", key: "key", value: "value"} + updated: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + platform: GCP + platformStatus: + type: GCP + gcp: + resourceTags: + - {parentID: "1234567890", key: "key", value: "changed"} + expectedStatusError: "status.platformStatus.gcp.resourceTags: Invalid value: \"array\": resourceTags are immutable and may only be configured during installation" + - name: Should not be able to add a Tag to an existing GCP ResourceTags + initial: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + controlPlaneTopology: "HighlyAvailable" + infrastructureTopology: "HighlyAvailable" + platform: GCP + platformStatus: + type: GCP + gcp: + resourceTags: + - {parentID: "1234567890", key: "key", value: "value"} + updated: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + platform: GCP + platformStatus: + type: GCP + gcp: + resourceTags: + - {parentID: "1234567890", key: "key", value: "value"} + - {parentID: "test-project-123", key: "new", value: "tag"} + expectedStatusError: "status.platformStatus.gcp.resourceTags: Invalid value: \"array\": resourceTags are immutable and may only be configured during installation" + - name: Should not be able to remove a Tag from an existing GCP ResourceTags + initial: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + platform: GCP + platformStatus: + type: GCP + gcp: + resourceTags: + - {parentID: "1234567890", key: "key1", value: "value1"} + - {parentID: "test-project-123", key: "key2", value: "value2"} + updated: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + platform: GCP + platformStatus: + type: GCP + gcp: + resourceTags: + - {parentID: "1234567890", key: "key1", value: "value1"} + expectedStatusError: "status.platformStatus.gcp.resourceTags: Invalid value: \"array\": resourceTags are immutable and may only be configured during installation" + - name: Should not be able to add GCP ResourceTags to an empty platformStatus.gcp + initial: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + platform: GCP + platformStatus: + type: GCP + gcp: {} + updated: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + platform: GCP + platformStatus: + gcp: + resourceTags: + - {parentID: "1234567890", key: "key", value: "value"} + expectedStatusError: "status.platformStatus.gcp: Invalid value: \"object\": resourceTags may only be configured during installation" + - name: Should not be able to remove GCP ResourceTags from platformStatus.gcp + initial: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + platform: GCP + platformStatus: + type: GCP + gcp: + resourceTags: + - {parentID: "1234567890", key: "key", value: "value"} + updated: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + platform: GCP + platformStatus: + type: GCP + gcp: {} + expectedStatusError: "status.platformStatus.gcp: Invalid value: \"object\": resourceTags may only be configured during installation" + - name: Should not be able to modify ParentID of a Tag in the GCP ResourceTags + initial: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + controlPlaneTopology: "HighlyAvailable" + infrastructureTopology: "HighlyAvailable" + platform: GCP + platformStatus: + type: GCP + gcp: + resourceTags: + - {parentID: "1234567890", key: "key", value: "value"} + updated: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + platform: GCP + platformStatus: + type: GCP + gcp: + resourceTags: + - {parentID: "test-project-123", key: "key", value: "value"} + expectedStatusError: "status.platformStatus.gcp.resourceTags: Invalid value: \"array\": resourceTags are immutable and may only be configured during installation" diff --git a/vendor/github.com/openshift/api/config/v1/feature_gates.go b/vendor/github.com/openshift/api/config/v1/feature_gates.go new file mode 100644 index 000000000..364b1df93 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/feature_gates.go @@ -0,0 +1,304 @@ +package v1 + +// FeatureGateDescription is a golang-only interface used to contains details for a feature gate. +type FeatureGateDescription struct { + // FeatureGateAttributes is the information that appears in the API + FeatureGateAttributes FeatureGateAttributes + + // OwningJiraComponent is the jira component that owns most of the impl and first assignment for the bug. + // This is the team that owns the feature long term. + OwningJiraComponent string + // ResponsiblePerson is the person who is on the hook for first contact. This is often, but not always, a team lead. + // It is someone who can make the promise on the behalf of the team. + ResponsiblePerson string + // OwningProduct is the product that owns the lifecycle of the gate. + OwningProduct OwningProduct +} + +type OwningProduct string + +var ( + ocpSpecific = OwningProduct("OCP") + kubernetes = OwningProduct("Kubernetes") +) + +var ( + FeatureGateGatewayAPI = FeatureGateName("GatewayAPI") + gateGatewayAPI = FeatureGateDescription{ + FeatureGateAttributes: FeatureGateAttributes{ + Name: FeatureGateGatewayAPI, + }, + OwningJiraComponent: "Routing", + ResponsiblePerson: "miciah", + OwningProduct: ocpSpecific, + } + + FeatureGateOpenShiftPodSecurityAdmission = FeatureGateName("OpenShiftPodSecurityAdmission") + openShiftPodSecurityAdmission = FeatureGateDescription{ + FeatureGateAttributes: FeatureGateAttributes{ + Name: FeatureGateOpenShiftPodSecurityAdmission, + }, + OwningJiraComponent: "auth", + ResponsiblePerson: "stlaz", + OwningProduct: ocpSpecific, + } + + FeatureGateRetroactiveDefaultStorageClass = FeatureGateName("RetroactiveDefaultStorageClass") + retroactiveDefaultStorageClass = FeatureGateDescription{ + FeatureGateAttributes: FeatureGateAttributes{ + Name: FeatureGateRetroactiveDefaultStorageClass, + }, + OwningJiraComponent: "storage", + ResponsiblePerson: "RomanBednar", + OwningProduct: kubernetes, + } + + FeatureGateExternalCloudProvider = FeatureGateName("ExternalCloudProvider") + externalCloudProvider = FeatureGateDescription{ + FeatureGateAttributes: FeatureGateAttributes{ + Name: FeatureGateExternalCloudProvider, + }, + OwningJiraComponent: "cloud-provider", + ResponsiblePerson: "jspeed", + OwningProduct: ocpSpecific, + } + + FeatureGateExternalCloudProviderAzure = FeatureGateName("ExternalCloudProviderAzure") + externalCloudProviderAzure = FeatureGateDescription{ + FeatureGateAttributes: FeatureGateAttributes{ + Name: FeatureGateExternalCloudProviderAzure, + }, + OwningJiraComponent: "cloud-provider", + ResponsiblePerson: "jspeed", + OwningProduct: ocpSpecific, + } + + FeatureGateExternalCloudProviderGCP = FeatureGateName("ExternalCloudProviderGCP") + externalCloudProviderGCP = FeatureGateDescription{ + FeatureGateAttributes: FeatureGateAttributes{ + Name: FeatureGateExternalCloudProviderGCP, + }, + OwningJiraComponent: "cloud-provider", + ResponsiblePerson: "jspeed", + OwningProduct: ocpSpecific, + } + + FeatureGateExternalCloudProviderExternal = FeatureGateName("ExternalCloudProviderExternal") + externalCloudProviderExternal = FeatureGateDescription{ + FeatureGateAttributes: FeatureGateAttributes{ + Name: FeatureGateExternalCloudProviderExternal, + }, + OwningJiraComponent: "cloud-provider", + ResponsiblePerson: "elmiko", + OwningProduct: ocpSpecific, + } + + FeatureGateCSIDriverSharedResource = FeatureGateName("CSIDriverSharedResource") + csiDriverSharedResource = FeatureGateDescription{ + FeatureGateAttributes: FeatureGateAttributes{ + Name: FeatureGateCSIDriverSharedResource, + }, + OwningJiraComponent: "builds", + ResponsiblePerson: "adkaplan", + OwningProduct: ocpSpecific, + } + + FeatureGateBuildCSIVolumes = FeatureGateName("BuildCSIVolumes") + buildCSIVolumes = FeatureGateDescription{ + FeatureGateAttributes: FeatureGateAttributes{ + Name: FeatureGateBuildCSIVolumes, + }, + OwningJiraComponent: "builds", + ResponsiblePerson: "adkaplan", + OwningProduct: ocpSpecific, + } + + FeatureGateNodeSwap = FeatureGateName("NodeSwap") + nodeSwap = FeatureGateDescription{ + FeatureGateAttributes: FeatureGateAttributes{ + Name: FeatureGateNodeSwap, + }, + OwningJiraComponent: "node", + ResponsiblePerson: "ehashman", + OwningProduct: kubernetes, + } + + FeatureGateMachineAPIProviderOpenStack = FeatureGateName("MachineAPIProviderOpenStack") + machineAPIProviderOpenStack = FeatureGateDescription{ + FeatureGateAttributes: FeatureGateAttributes{ + Name: FeatureGateMachineAPIProviderOpenStack, + }, + OwningJiraComponent: "openstack", + ResponsiblePerson: "egarcia", + OwningProduct: ocpSpecific, + } + + FeatureGateInsightsConfigAPI = FeatureGateName("InsightsConfigAPI") + insightsConfigAPI = FeatureGateDescription{ + FeatureGateAttributes: FeatureGateAttributes{ + Name: FeatureGateInsightsConfigAPI, + }, + OwningJiraComponent: "insights", + ResponsiblePerson: "tremes", + OwningProduct: ocpSpecific, + } + + FeatureGatePDBUnhealthyPodEvictionPolicy = FeatureGateName("PDBUnhealthyPodEvictionPolicy") + pdbUnhealthyPodEvictionPolicy = FeatureGateDescription{ + FeatureGateAttributes: FeatureGateAttributes{ + Name: FeatureGatePDBUnhealthyPodEvictionPolicy, + }, + OwningJiraComponent: "apps", + ResponsiblePerson: "atiratree", + OwningProduct: kubernetes, + } + + FeatureGateDynamicResourceAllocation = FeatureGateName("DynamicResourceAllocation") + dynamicResourceAllocation = FeatureGateDescription{ + FeatureGateAttributes: FeatureGateAttributes{ + Name: FeatureGateDynamicResourceAllocation, + }, + OwningJiraComponent: "scheduling", + ResponsiblePerson: "jchaloup", + OwningProduct: kubernetes, + } + + FeatureGateAdmissionWebhookMatchConditions = FeatureGateName("AdmissionWebhookMatchConditions") + admissionWebhookMatchConditions = FeatureGateDescription{ + FeatureGateAttributes: FeatureGateAttributes{ + Name: FeatureGateAdmissionWebhookMatchConditions, + }, + OwningJiraComponent: "kube-apiserver", + ResponsiblePerson: "benluddy", + OwningProduct: kubernetes, + } + + FeatureGateAzureWorkloadIdentity = FeatureGateName("AzureWorkloadIdentity") + azureWorkloadIdentity = FeatureGateDescription{ + FeatureGateAttributes: FeatureGateAttributes{ + Name: FeatureGateAzureWorkloadIdentity, + }, + OwningJiraComponent: "cloud-credential-operator", + ResponsiblePerson: "abutcher", + OwningProduct: ocpSpecific, + } + + FeatureGateMaxUnavailableStatefulSet = FeatureGateName("MaxUnavailableStatefulSet") + maxUnavailableStatefulSet = FeatureGateDescription{ + FeatureGateAttributes: FeatureGateAttributes{ + Name: FeatureGateMaxUnavailableStatefulSet, + }, + OwningJiraComponent: "apps", + ResponsiblePerson: "atiratree", + OwningProduct: kubernetes, + } + + FeatureGateEventedPLEG = FeatureGateName("EventedPLEG") + eventedPleg = FeatureGateDescription{ + FeatureGateAttributes: FeatureGateAttributes{ + Name: FeatureGateEventedPLEG, + }, + OwningJiraComponent: "node", + ResponsiblePerson: "sairameshv", + OwningProduct: kubernetes, + } + + FeatureGatePrivateHostedZoneAWS = FeatureGateName("PrivateHostedZoneAWS") + privateHostedZoneAWS = FeatureGateDescription{ + FeatureGateAttributes: FeatureGateAttributes{ + Name: FeatureGatePrivateHostedZoneAWS, + }, + OwningJiraComponent: "Routing", + ResponsiblePerson: "miciah", + OwningProduct: ocpSpecific, + } + + FeatureGateSigstoreImageVerification = FeatureGateName("SigstoreImageVerification") + sigstoreImageVerification = FeatureGateDescription{ + FeatureGateAttributes: FeatureGateAttributes{ + Name: FeatureGateSigstoreImageVerification, + }, + OwningJiraComponent: "node", + ResponsiblePerson: "sgrunert", + OwningProduct: ocpSpecific, + } + + FeatureGateGCPLabelsTags = FeatureGateName("GCPLabelsTags") + gcpLabelsTags = FeatureGateDescription{ + FeatureGateAttributes: FeatureGateAttributes{ + Name: FeatureGateGCPLabelsTags, + }, + OwningJiraComponent: "Installer", + ResponsiblePerson: "bhb", + OwningProduct: ocpSpecific, + } + + FeatureGateAlibabaPlatform = FeatureGateName("AlibabaPlatform") + alibabaPlatform = FeatureGateDescription{ + FeatureGateAttributes: FeatureGateAttributes{ + Name: FeatureGateAlibabaPlatform, + }, + OwningJiraComponent: "cloud-provider", + ResponsiblePerson: "jspeed", + OwningProduct: ocpSpecific, + } + + FeatureGateCloudDualStackNodeIPs = FeatureGateName("CloudDualStackNodeIPs") + cloudDualStackNodeIPs = FeatureGateDescription{ + FeatureGateAttributes: FeatureGateAttributes{ + Name: FeatureGateCloudDualStackNodeIPs, + }, + OwningJiraComponent: "machine-config-operator/platform-baremetal", + ResponsiblePerson: "mkowalsk", + OwningProduct: kubernetes, + } + FeatureGateVSphereStaticIPs = FeatureGateName("VSphereStaticIPs") + vSphereStaticIPs = FeatureGateDescription{ + FeatureGateAttributes: FeatureGateAttributes{ + Name: FeatureGateVSphereStaticIPs, + }, + OwningJiraComponent: "splat", + ResponsiblePerson: "rvanderp3", + OwningProduct: ocpSpecific, + } + + FeatureGateRouteExternalCertificate = FeatureGateName("RouteExternalCertificate") + routeExternalCertificate = FeatureGateDescription{ + FeatureGateAttributes: FeatureGateAttributes{ + Name: FeatureGateRouteExternalCertificate, + }, + OwningJiraComponent: "router", + ResponsiblePerson: "thejasn", + OwningProduct: ocpSpecific, + } + + FeatureGateAdminNetworkPolicy = FeatureGateName("AdminNetworkPolicy") + adminNetworkPolicy = FeatureGateDescription{ + FeatureGateAttributes: FeatureGateAttributes{ + Name: FeatureGateAdminNetworkPolicy, + }, + OwningJiraComponent: "Networking/ovn-kubernetes", + ResponsiblePerson: "tssurya", + OwningProduct: ocpSpecific, + } + + FeatureGateAutomatedEtcdBackup = FeatureGateName("AutomatedEtcdBackup") + automatedEtcdBackup = FeatureGateDescription{ + FeatureGateAttributes: FeatureGateAttributes{ + Name: FeatureGateAutomatedEtcdBackup, + }, + OwningJiraComponent: "etcd", + ResponsiblePerson: "hasbro17", + OwningProduct: ocpSpecific, + } + + FeatureGateMachineAPIOperatorDisableMachineHealthCheckController = FeatureGateName("MachineAPIOperatorDisableMachineHealthCheckController") + machineAPIOperatorDisableMachineHealthCheckController = FeatureGateDescription{ + FeatureGateAttributes: FeatureGateAttributes{ + Name: FeatureGateMachineAPIOperatorDisableMachineHealthCheckController, + }, + OwningJiraComponent: "ecoproject", + ResponsiblePerson: "msluiter", + OwningProduct: ocpSpecific, + } +) diff --git a/vendor/github.com/openshift/api/config/v1/stable.apiserver.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/stable.apiserver.testsuite.yaml index 5c28143d5..75f846a3d 100644 --- a/vendor/github.com/openshift/api/config/v1/stable.apiserver.testsuite.yaml +++ b/vendor/github.com/openshift/api/config/v1/stable.apiserver.testsuite.yaml @@ -1,16 +1,36 @@ apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this name: "[Stable] APIServer" -crd: 0000_10_config-operator_01_apiserver.crd.yaml +crd: 0000_10_config-operator_01_apiserver-Default.crd.yaml tests: onCreate: - - name: Should be able to create a minimal ClusterOperator + - name: Should be able to create encrypt with aescbc initial: | apiVersion: config.openshift.io/v1 kind: APIServer - spec: {} # No spec is required for a APIServer + spec: + encryption: + type: aescbc + expected: | + apiVersion: config.openshift.io/v1 + kind: APIServer + spec: + audit: + profile: Default + encryption: + type: aescbc + - name: Should be able to create encrypt with aesgcm + initial: | + apiVersion: config.openshift.io/v1 + kind: APIServer + spec: + encryption: + type: aesgcm expected: | apiVersion: config.openshift.io/v1 kind: APIServer spec: audit: profile: Default + encryption: + type: aesgcm + diff --git a/vendor/github.com/openshift/api/config/v1/stable.dns.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/stable.dns.testsuite.yaml index c69f50050..3054d200e 100644 --- a/vendor/github.com/openshift/api/config/v1/stable.dns.testsuite.yaml +++ b/vendor/github.com/openshift/api/config/v1/stable.dns.testsuite.yaml @@ -1,6 +1,6 @@ apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this name: "[Stable] DNS" -crd: 0000_10_config-operator_01_dns.crd.yaml +crd: 0000_10_config-operator_01_dns-Default.crd.yaml tests: onCreate: - name: Should be able to create a minimal DNS @@ -12,3 +12,94 @@ tests: apiVersion: config.openshift.io/v1 kind: DNS spec: {} + - name: Should be able to specify an AWS role ARN for a private hosted zone + initial: | + apiVersion: config.openshift.io/v1 + kind: DNS + spec: + platform: + type: AWS + aws: + privateZoneIAMRole: arn:aws:iam::123456789012:role/foo + expected: | + apiVersion: config.openshift.io/v1 + kind: DNS + spec: + platform: + type: AWS + aws: + privateZoneIAMRole: arn:aws:iam::123456789012:role/foo + - name: Should not be able to specify unsupported platform + initial: | + apiVersion: config.openshift.io/v1 + kind: DNS + spec: + platform: + type: Azure + azure: + privateZoneIAMRole: arn:aws:iam::123456789012:role/foo + expectedError: "Invalid value: \"string\": allowed values are '' and 'AWS'" + - name: Should not be able to specify invalid AWS role ARN + initial: | + apiVersion: config.openshift.io/v1 + kind: DNS + metadata: + name: cluster + spec: + platform: + type: AWS + aws: + privateZoneIAMRole: arn:aws:iam:bad:123456789012:role/foo + expectedError: "DNS.config.openshift.io \"cluster\" is invalid: spec.platform.aws.privateZoneIAMRole: Invalid value: \"arn:aws:iam:bad:123456789012:role/foo\": spec.platform.aws.privateZoneIAMRole in body should match '^arn:(aws|aws-cn|aws-us-gov):iam::[0-9]{12}:role\\/.*$'" + - name: Should not be able to specify different type and platform + initial: | + apiVersion: config.openshift.io/v1 + kind: DNS + spec: + platform: + type: "" + aws: + privateZoneIAMRole: arn:aws:iam::123456789012:role/foo + expectedError: "Invalid value: \"object\": aws configuration is required when platform is AWS, and forbidden otherwise" + onUpdate: + - name: Can switch from empty (default), to AWS + initial: | + apiVersion: config.openshift.io/v1 + kind: DNS + spec: + platform: + type: "" + updated: | + apiVersion: config.openshift.io/v1 + kind: DNS + spec: + platform: + type: AWS + aws: + privateZoneIAMRole: arn:aws:iam::123456789012:role/foo + expected: | + apiVersion: config.openshift.io/v1 + kind: DNS + spec: + platform: + type: AWS + aws: + privateZoneIAMRole: arn:aws:iam::123456789012:role/foo + - name: Upgrade case is valid + initial: | + apiVersion: config.openshift.io/v1 + kind: DNS + spec: {} # No spec is required for a DNS + updated: | + apiVersion: config.openshift.io/v1 + kind: DNS + spec: + platform: + type: "" + expected: | + apiVersion: config.openshift.io/v1 + kind: DNS + spec: + platform: + type: "" + diff --git a/vendor/github.com/openshift/api/config/v1/stable.infrastructure.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/stable.infrastructure.testsuite.yaml index bbafe4c47..63da9aa41 100644 --- a/vendor/github.com/openshift/api/config/v1/stable.infrastructure.testsuite.yaml +++ b/vendor/github.com/openshift/api/config/v1/stable.infrastructure.testsuite.yaml @@ -56,3 +56,910 @@ tests: external: platformName: SomeOtherCoolplatformName expectedError: " spec.platformSpec.external.platformName: Invalid value: \"string\": platform name cannot be changed once set" + - name: Should not be able to modify an existing Azure ResourceTags Tag + initial: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + controlPlaneTopology: "HighlyAvailable" + infrastructureTopology: "HighlyAvailable" + platform: Azure + platformStatus: + type: Azure + azure: + resourceTags: + - {key: "key", value: "value"} + updated: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + platform: Azure + platformStatus: + type: Azure + azure: + resourceTags: + - {key: "key", value: "changed"} + expectedStatusError: "status.platformStatus.azure.resourceTags: Invalid value: \"array\": resourceTags are immutable and may only be configured during installation" + - name: Should not be able to add a Tag to an existing Azure ResourceTags + initial: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + controlPlaneTopology: "HighlyAvailable" + infrastructureTopology: "HighlyAvailable" + platform: Azure + platformStatus: + type: Azure + azure: + resourceTags: + - {key: "key", value: "value"} + updated: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + platform: Azure + platformStatus: + type: Azure + azure: + resourceTags: + - {key: "key", value: "value"} + - {key: "new", value: "entry"} + expectedStatusError: "status.platformStatus.azure.resourceTags: Invalid value: \"array\": resourceTags are immutable and may only be configured during installation" + - name: Should not be able to remove a Tag from an existing Azure ResourceTags + initial: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + platform: Azure + platformStatus: + type: Azure + azure: + resourceTags: + - {key: "key", value: "value"} + - {key: "new", value: "entry"} + updated: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + platform: Azure + platformStatus: + type: Azure + azure: + resourceTags: + - {key: "key", value: "value"} + expectedStatusError: "status.platformStatus.azure.resourceTags: Invalid value: \"array\": resourceTags are immutable and may only be configured during installation" + - name: Should not be able to add Azure ResourceTags to an empty platformStatus.azure + initial: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + platform: Azure + platformStatus: + type: Azure + azure: {} + updated: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + platform: Azure + platformStatus: + azure: + resourceTags: + - {key: "key", value: "value"} + expectedStatusError: "status.platformStatus.azure: Invalid value: \"object\": resourceTags may only be configured during installation" + - name: Should not be able to remove Azure ResourceTags from platformStatus.azure + initial: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + platform: Azure + platformStatus: + type: Azure + azure: + resourceTags: + - {key: "key", value: "value"} + updated: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + platform: Azure + platformStatus: + type: Azure + azure: {} + expectedStatusError: "status.platformStatus.azure: Invalid value: \"object\": resourceTags may only be configured during installation" + - name: Should be able to modify the ResourceGroupName while Azure ResourceTags are present + initial: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + platform: Azure + platformStatus: + type: Azure + azure: + resourceGroupName: foo + resourceTags: + - {key: "key", value: "value"} + updated: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + platform: Azure + platformStatus: + azure: + resourceGroupName: bar + resourceTags: + - {key: "key", value: "value"} + expected: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + controlPlaneTopology: "HighlyAvailable" + infrastructureTopology: "HighlyAvailable" + platform: Azure + platformStatus: + azure: + resourceGroupName: bar + resourceTags: + - {key: "key", value: "value"} + - name: PowerVS platform status's resourceGroup length should not exceed the max length set + initial: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: + platformSpec: + type: PowerVS + status: + platform: PowerVS + platformStatus: + powervs: + resourceGroup: resource-group + updated: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: + platformSpec: + type: PowerVS + status: + platform: PowerVS + platformStatus: + powervs: + resourceGroup: resource-group-should-not-accept-the-string-that-exceeds-max-length-set + expectedStatusError: "status.platformStatus.powervs.resourceGroup: Too long: may not be longer than 40" + - name: PowerVS platform status's resourceGroup should match the regex configured + initial: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: + platformSpec: + type: PowerVS + status: + platform: PowerVS + platformStatus: + powervs: + resourceGroup: resource-group + updated: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: + platformSpec: + type: PowerVS + status: + platform: PowerVS + platformStatus: + powervs: + resourceGroup: re$ource-group + expectedStatusError: "status.platformStatus.powervs.resourceGroup in body should match '^[a-zA-Z0-9-_ ]+$'" + - name: Should not be able to change PowerVS platform status's resourceGroup once it was set + initial: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: + platformSpec: + type: PowerVS + status: + platform: PowerVS + platformStatus: + powervs: + resourceGroup: resource-group + updated: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: + platformSpec: + type: PowerVS + status: + platform: PowerVS + platformStatus: + powervs: + resourceGroup: other-resource-group-name + expectedStatusError: "status.platformStatus.powervs.resourceGroup: Invalid value: \"string\": resourceGroup is immutable once set" + - name: Should not be able to unset PowerVS platform status's resourceGroup once it was set + initial: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: + platformSpec: + type: PowerVS + status: + platform: PowerVS + platformStatus: + powervs: + region: some-region + resourceGroup: resource-group + updated: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: + platformSpec: + type: PowerVS + status: + platform: PowerVS + platformStatus: + powervs: + region: some-region + expectedStatusError: "status.platformStatus.powervs: Invalid value: \"object\": cannot unset resourceGroup once set" + - name: Should set load balancer type to OpenShiftManagedDefault if not specified + initial: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: + platformSpec: + openstack: {} + type: OpenStack + updated: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: + platformSpec: + openstack: {} + type: OpenStack + status: + platform: OpenStack + platformStatus: + openstack: {} + type: OpenStack + expected: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: + platformSpec: + openstack: {} + type: OpenStack + status: + controlPlaneTopology: HighlyAvailable + infrastructureTopology: HighlyAvailable + platform: OpenStack + platformStatus: + openstack: + loadBalancer: + type: OpenShiftManagedDefault + type: OpenStack + - name: Should be able to override the default load balancer with a valid value + initial: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: + platformSpec: + openstack: {} + type: OpenStack + updated: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: + platformSpec: + openstack: {} + type: OpenStack + status: + platform: OpenStack + platformStatus: + openstack: + loadBalancer: + type: UserManaged + type: OpenStack + expected: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: + platformSpec: + openstack: {} + type: OpenStack + status: + controlPlaneTopology: HighlyAvailable + infrastructureTopology: HighlyAvailable + platform: OpenStack + platformStatus: + openstack: + loadBalancer: + type: UserManaged + type: OpenStack + - name: Should not allow changing the immutable load balancer type field + initial: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: + platformSpec: + openstack: {} + type: OpenStack + status: + controlPlaneTopology: HighlyAvailable + infrastructureTopology: HighlyAvailable + platform: OpenStack + platformStatus: + openstack: + loadBalancer: + type: OpenShiftManagedDefault + type: OpenStack + updated: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: + platformSpec: + type: OpenStack + openstack: {} + status: + controlPlaneTopology: HighlyAvailable + infrastructureTopology: HighlyAvailable + platform: OpenStack + platformStatus: + openstack: + loadBalancer: + type: UserManaged + type: OpenStack + expectedStatusError: "status.platformStatus.openstack.loadBalancer.type: Invalid value: \"string\": type is immutable once set" + - name: Should not allow removing the immutable load balancer type field that was initially set + initial: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: + platformSpec: + openstack: {} + type: OpenStack + status: + controlPlaneTopology: HighlyAvailable + infrastructureTopology: HighlyAvailable + platform: OpenStack + platformStatus: + openstack: + loadBalancer: + type: UserManaged + type: OpenStack + updated: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: + platformSpec: + type: OpenStack + openstack: {} + status: + controlPlaneTopology: HighlyAvailable + infrastructureTopology: HighlyAvailable + platform: OpenStack + platformStatus: + openstack: {} + type: OpenStack + expectedStatusError: "status.platformStatus.openstack.loadBalancer.type: Invalid value: \"string\": type is immutable once set" + - name: Should not allow setting the load balancer type to a wrong value + initial: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: + platformSpec: + openstack: {} + type: OpenStack + updated: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: + platformSpec: + openstack: {} + type: OpenStack + status: + platform: OpenStack + platformStatus: + openstack: + loadBalancer: + type: FooBar + type: OpenStack + expectedStatusError: "status.platformStatus.openstack.loadBalancer.type: Unsupported value: \"FooBar\": supported values: \"OpenShiftManagedDefault\", \"UserManaged\"" + - name: Should not be able to update cloudControllerManager state to empty string when state is already set to None + initial: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + platform: External + platformStatus: + type: External + external: + cloudControllerManager: + state: None + updated: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + platformStatus: + external: + cloudControllerManager: + state: "" + expectedStatusError: " status.platformStatus.external.cloudControllerManager.state: Invalid value: \"string\": state is immutable once set" + - name: Should not be able to update cloudControllerManager state to External when state is already set to None + initial: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + platform: External + platformStatus: + type: External + external: + cloudControllerManager: + state: None + updated: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + platform: External + platformStatus: + type: External + external: + cloudControllerManager: + state: External + expectedStatusError: " status.platformStatus.external.cloudControllerManager.state: Invalid value: \"string\": state is immutable once set" + - name: Should be able to update cloudControllerManager state to None when state is already set to None + initial: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + platform: External + platformStatus: + type: External + external: + cloudControllerManager: + state: None + updated: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + platform: External + platformStatus: + type: External + external: + cloudControllerManager: + state: None + expected: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + controlPlaneTopology: HighlyAvailable + infrastructureTopology: HighlyAvailable + platform: External + platformStatus: + type: External + external: + cloudControllerManager: + state: None + - name: Should not be able to unset cloudControllerManager state when state is already set to None + initial: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + platform: External + platformStatus: + type: External + external: + cloudControllerManager: + state: None + updated: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + platform: External + platformStatus: + type: External + external: + cloudControllerManager: {} + expectedStatusError: " status.platformStatus.external.cloudControllerManager: Invalid value: \"object\": state may not be added or removed once set" + - name: Should not be able to update cloudControllerManager state to empty string when state is already set to External + initial: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + platform: External + platformStatus: + type: External + external: + cloudControllerManager: + state: External + updated: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + platform: External + platformStatus: + type: External + external: + cloudControllerManager: + state: "" + expectedStatusError: " status.platformStatus.external.cloudControllerManager.state: Invalid value: \"string\": state is immutable once set" + - name: Should not be able to update cloudControllerManager state to None when state is already set to External + initial: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + platform: External + platformStatus: + type: External + external: + cloudControllerManager: + state: External + updated: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + platform: External + platformStatus: + type: External + external: + cloudControllerManager: + state: None + expectedStatusError: " status.platformStatus.external.cloudControllerManager.state: Invalid value: \"string\": state is immutable once set" + - name: Should be able to update cloudControllerManager state to External when state is already set to External + initial: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + platform: External + platformStatus: + type: External + external: + cloudControllerManager: + state: External + updated: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + platform: External + platformStatus: + type: External + external: + cloudControllerManager: + state: External + expected: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + controlPlaneTopology: HighlyAvailable + infrastructureTopology: HighlyAvailable + platform: External + platformStatus: + type: External + external: + cloudControllerManager: + state: External + - name: Should not be able to unset cloudControllerManager state when state is already set to External + initial: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + platform: External + platformStatus: + type: External + external: + cloudControllerManager: + state: External + updated: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + platform: External + platformStatus: + type: External + external: + cloudControllerManager: {} + expectedStatusError: " status.platformStatus.external.cloudControllerManager: Invalid value: \"object\": state may not be added or removed once set" + - name: Should not be able to update cloudControllerManager state to None when state is already set to empty string + initial: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + platform: External + platformStatus: + type: External + external: + cloudControllerManager: + state: "" + updated: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + platform: External + platformStatus: + type: External + external: + cloudControllerManager: + state: None + expectedStatusError: " status.platformStatus.external.cloudControllerManager.state: Invalid value: \"string\": state is immutable once set" + - name: Should not be able to update cloudControllerManager state to External when state is already set to empty string + initial: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + platform: External + platformStatus: + type: External + external: + cloudControllerManager: + state: "" + updated: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + platform: External + platformStatus: + type: External + external: + cloudControllerManager: + state: External + expectedStatusError: " status.platformStatus.external.cloudControllerManager.state: Invalid value: \"string\": state is immutable once set" + - name: Should be able to update cloudControllerManager state to empty string when state is already set to empty string + initial: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + platform: External + platformStatus: + type: External + external: + cloudControllerManager: + state: "" + updated: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + platform: External + platformStatus: + type: External + external: + cloudControllerManager: + state: "" + expected: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + controlPlaneTopology: HighlyAvailable + infrastructureTopology: HighlyAvailable + platform: External + platformStatus: + type: External + external: + cloudControllerManager: + state: "" + - name: Should not be able to unset cloudControllerManager state when state is already set to empty string + initial: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + platform: External + platformStatus: + type: External + external: + cloudControllerManager: + state: "" + updated: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + platform: External + platformStatus: + type: External + external: + cloudControllerManager: {} + expectedStatusError: " status.platformStatus.external.cloudControllerManager: Invalid value: \"object\": state may not be added or removed once set" + - name: Should be able to update cloudControllerManager state to None when cloudControllerManager state is unset + initial: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + platform: External + platformStatus: + type: External + external: + cloudControllerManager: {} + updated: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + platform: External + platformStatus: + type: External + external: + cloudControllerManager: + state: None + expected: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + controlPlaneTopology: HighlyAvailable + infrastructureTopology: HighlyAvailable + platform: External + platformStatus: + type: External + external: + cloudControllerManager: + state: None + - name: Should be able to update cloudControllerManager state to empty string when cloudControllerManager state is unset + initial: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + platform: External + platformStatus: + type: External + external: + cloudControllerManager: {} + updated: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + platform: External + platformStatus: + type: External + external: + cloudControllerManager: + state: "" + expected: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + controlPlaneTopology: HighlyAvailable + infrastructureTopology: HighlyAvailable + platform: External + platformStatus: + type: External + external: + cloudControllerManager: + state: "" + - name: Should not be able to update cloudControllerManager state to External when cloudControllerManager state is unset + initial: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + platform: External + platformStatus: + type: External + external: + cloudControllerManager: {} + updated: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + platform: External + platformStatus: + type: External + external: + cloudControllerManager: + state: External + expectedStatusError: " status.platformStatus.external.cloudControllerManager: Invalid value: \"object\": state may not be added or removed once set" + - name: Should be able to unset cloudControllerManager state when cloudControllerManager state is unset + initial: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + platform: External + platformStatus: + type: External + external: + cloudControllerManager: {} + updated: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + platform: External + platformStatus: + type: External + external: + cloudControllerManager: {} + expected: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + controlPlaneTopology: HighlyAvailable + infrastructureTopology: HighlyAvailable + platform: External + platformStatus: + type: External + external: + cloudControllerManager: {} + - name: Should not be able to add cloudControllerManager when cloudControllerManager is unset + initial: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + platform: External + platformStatus: + type: External + external: {} + updated: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + platform: External + platformStatus: + type: External + external: + cloudControllerManager: + state: External + expectedStatusError: " status.platformStatus.external: Invalid value: \"object\": cloudControllerManager may not be added or removed once set" + - name: Should not be able to remove cloudControllerManager when cloudControllerManager is set + initial: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + platform: External + platformStatus: + type: External + external: + cloudControllerManager: + state: External + updated: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + platform: External + platformStatus: + type: External + external: {} + expectedStatusError: " status.platformStatus.external: Invalid value: \"object\": cloudControllerManager may not be added or removed once set" diff --git a/vendor/github.com/openshift/api/config/v1/techpreview.apiserver.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/techpreview.apiserver.testsuite.yaml new file mode 100644 index 000000000..74aa92b47 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/techpreview.apiserver.testsuite.yaml @@ -0,0 +1,35 @@ +apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this +name: "[TechPreviewNoUpgrade] APIServer" +crd: 0000_10_config-operator_01_apiserver-TechPreviewNoUpgrade.crd.yaml +tests: + onCreate: + - name: Should be able to create encrypt with aescbc + initial: | + apiVersion: config.openshift.io/v1 + kind: APIServer + spec: + encryption: + type: aescbc + expected: | + apiVersion: config.openshift.io/v1 + kind: APIServer + spec: + audit: + profile: Default + encryption: + type: aescbc + - name: Should be able to create encrypt with aesgcm + initial: | + apiVersion: config.openshift.io/v1 + kind: APIServer + spec: + encryption: + type: aesgcm + expected: | + apiVersion: config.openshift.io/v1 + kind: APIServer + spec: + audit: + profile: Default + encryption: + type: aesgcm diff --git a/vendor/github.com/openshift/api/config/v1/techpreview.dns.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/techpreview.dns.testsuite.yaml new file mode 100644 index 000000000..ec64352e3 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1/techpreview.dns.testsuite.yaml @@ -0,0 +1,14 @@ +apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this +name: "[TechPreview] DNS" +crd: 0000_10_config-operator_01_dns-TechPreviewNoUpgrade.crd.yaml +tests: + onCreate: + - name: Should be able to create a minimal DNS + initial: | + apiVersion: config.openshift.io/v1 + kind: DNS + spec: {} # No spec is required for a DNS + expected: | + apiVersion: config.openshift.io/v1 + kind: DNS + spec: {} diff --git a/vendor/github.com/openshift/api/config/v1/techpreview.infrastructure.testsuite.yaml b/vendor/github.com/openshift/api/config/v1/techpreview.infrastructure.testsuite.yaml index 23580beea..7834e1f84 100644 --- a/vendor/github.com/openshift/api/config/v1/techpreview.infrastructure.testsuite.yaml +++ b/vendor/github.com/openshift/api/config/v1/techpreview.infrastructure.testsuite.yaml @@ -12,3 +12,508 @@ tests: apiVersion: config.openshift.io/v1 kind: Infrastructure spec: {} + onUpdate: + - name: Status Should contain default fields + initial: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: {} + updated: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: {} + expected: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + cpuPartitioning: None + infrastructureTopology: HighlyAvailable + controlPlaneTopology: HighlyAvailable + - name: Status update cpuPartitioning should fail validation check + initial: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + cpuPartitioning: None + updated: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + cpuPartitioning: "Invalid" + expectedStatusError: 'status.cpuPartitioning: Unsupported value: "Invalid": supported values: "None", "AllNodes"' + - name: Should set load balancer type to OpenShiftManagedDefault if not specified + initial: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: + platformSpec: + baremetal: {} + type: BareMetal + updated: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: + platformSpec: + baremetal: {} + type: BareMetal + status: + platform: BareMetal + platformStatus: + baremetal: {} + type: BareMetal + expected: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: + platformSpec: + baremetal: {} + type: BareMetal + status: + controlPlaneTopology: HighlyAvailable + cpuPartitioning: None + infrastructureTopology: HighlyAvailable + platform: BareMetal + platformStatus: + baremetal: + loadBalancer: + type: OpenShiftManagedDefault + type: BareMetal + - name: Should be able to override the default load balancer with a valid value + initial: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: + platformSpec: + baremetal: {} + type: BareMetal + updated: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: + platformSpec: + baremetal: {} + type: BareMetal + status: + platform: BareMetal + platformStatus: + baremetal: + loadBalancer: + type: UserManaged + type: BareMetal + expected: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: + platformSpec: + baremetal: {} + type: BareMetal + status: + controlPlaneTopology: HighlyAvailable + cpuPartitioning: None + infrastructureTopology: HighlyAvailable + platform: BareMetal + platformStatus: + baremetal: + loadBalancer: + type: UserManaged + type: BareMetal + - name: Should not allow changing the immutable load balancer type field + initial: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: + platformSpec: + baremetal: {} + type: BareMetal + status: + controlPlaneTopology: HighlyAvailable + infrastructureTopology: HighlyAvailable + platform: BareMetal + platformStatus: + baremetal: + loadBalancer: + type: OpenShiftManagedDefault + type: BareMetal + updated: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: + platformSpec: + type: BareMetal + baremetal: {} + status: + controlPlaneTopology: HighlyAvailable + infrastructureTopology: HighlyAvailable + platform: BareMetal + platformStatus: + baremetal: + loadBalancer: + type: UserManaged + type: BareMetal + expectedStatusError: "status.platformStatus.baremetal.loadBalancer.type: Invalid value: \"string\": type is immutable once set" + - name: Should not allow removing the immutable load balancer type field that was initially set + initial: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: + platformSpec: + baremetal: {} + type: BareMetal + status: + controlPlaneTopology: HighlyAvailable + infrastructureTopology: HighlyAvailable + platform: BareMetal + platformStatus: + baremetal: + loadBalancer: + type: UserManaged + type: BareMetal + updated: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: + platformSpec: + type: BareMetal + baremetal: {} + status: + controlPlaneTopology: HighlyAvailable + infrastructureTopology: HighlyAvailable + platform: BareMetal + platformStatus: + baremetal: {} + type: BareMetal + expectedStatusError: "status.platformStatus.baremetal.loadBalancer.type: Invalid value: \"string\": type is immutable once set" + - name: Should not allow setting the load balancer type to a wrong value + initial: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: + platformSpec: + baremetal: {} + type: BareMetal + updated: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: + platformSpec: + baremetal: {} + type: BareMetal + status: + platform: BareMetal + platformStatus: + baremetal: + loadBalancer: + type: FooBar + type: BareMetal + expectedStatusError: "status.platformStatus.baremetal.loadBalancer.type: Unsupported value: \"FooBar\": supported values: \"OpenShiftManagedDefault\", \"UserManaged\"" + - name: Should not be able to modify an existing GCP ResourceLabels Label + initial: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + controlPlaneTopology: "HighlyAvailable" + infrastructureTopology: "HighlyAvailable" + platform: GCP + platformStatus: + type: GCP + gcp: + resourceLabels: + - {key: "key", value: "value"} + updated: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + platform: GCP + platformStatus: + type: GCP + gcp: + resourceLabels: + - {key: "key", value: "changed"} + expectedStatusError: "status.platformStatus.gcp.resourceLabels: Invalid value: \"array\": resourceLabels are immutable and may only be configured during installation" + - name: Should not be able to add a Label to an existing GCP ResourceLabels + initial: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + controlPlaneTopology: "HighlyAvailable" + infrastructureTopology: "HighlyAvailable" + platform: GCP + platformStatus: + type: GCP + gcp: + resourceLabels: + - {key: "key", value: "value"} + updated: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + platform: GCP + platformStatus: + type: GCP + gcp: + resourceLabels: + - {key: "key", value: "value"} + - {key: "new", value: "entry"} + expectedStatusError: "status.platformStatus.gcp.resourceLabels: Invalid value: \"array\": resourceLabels are immutable and may only be configured during installation" + - name: Should not be able to remove a Label from an existing GCP ResourceLabels + initial: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + platform: GCP + platformStatus: + type: GCP + gcp: + resourceLabels: + - {key: "key", value: "value"} + - {key: "new", value: "entry"} + updated: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + platform: GCP + platformStatus: + type: GCP + gcp: + resourceLabels: + - {key: "key", value: "value"} + expectedStatusError: "status.platformStatus.gcp.resourceLabels: Invalid value: \"array\": resourceLabels are immutable and may only be configured during installation" + - name: Should not be able to add GCP ResourceLabels to an empty platformStatus.gcp + initial: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + platform: GCP + platformStatus: + type: GCP + gcp: {} + updated: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + platform: GCP + platformStatus: + gcp: + resourceLabels: + - {key: "key", value: "value"} + expectedStatusError: "status.platformStatus.gcp: Invalid value: \"object\": resourceLabels may only be configured during installation" + - name: Should not be able to remove GCP ResourceLabels from platformStatus.gcp + initial: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + platform: GCP + platformStatus: + type: GCP + gcp: + resourceLabels: + - {key: "key", value: "value"} + updated: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + platform: GCP + platformStatus: + type: GCP + gcp: {} + expectedStatusError: "status.platformStatus.gcp: Invalid value: \"object\": resourceLabels may only be configured during installation" + - name: Should not have label key start with openshift-io for GCP ResourceLabels in platformStatus.gcp + initial: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: {} + updated: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + platform: GCP + platformStatus: + type: GCP + gcp: + resourceLabels: + - {key: "key", value: "value"} + - {key: "openshift-io-created-cluster", value: "true"} + expectedStatusError: "status.platformStatus.gcp.resourceLabels[1].key: Invalid value: \"string\": label keys must not start with either `openshift-io` or `kubernetes-io`" + - name: Should not have label key start with kubernetes-io for GCP ResourceLabels in platformStatus.gcp + initial: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: {} + updated: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + platform: GCP + platformStatus: + type: GCP + gcp: + resourceLabels: + - {key: "key", value: "value"} + - {key: "kubernetes-io-created-cluster", value: "true"} + expectedStatusError: "status.platformStatus.gcp.resourceLabels[1].key: Invalid value: \"string\": label keys must not start with either `openshift-io` or `kubernetes-io`" + - name: Should not be able to modify an existing GCP ResourceTags Tag + initial: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + controlPlaneTopology: "HighlyAvailable" + infrastructureTopology: "HighlyAvailable" + platform: GCP + platformStatus: + type: GCP + gcp: + resourceTags: + - {parentID: "1234567890", key: "key", value: "value"} + updated: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + platform: GCP + platformStatus: + type: GCP + gcp: + resourceTags: + - {parentID: "1234567890", key: "key", value: "changed"} + expectedStatusError: "status.platformStatus.gcp.resourceTags: Invalid value: \"array\": resourceTags are immutable and may only be configured during installation" + - name: Should not be able to add a Tag to an existing GCP ResourceTags + initial: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + controlPlaneTopology: "HighlyAvailable" + infrastructureTopology: "HighlyAvailable" + platform: GCP + platformStatus: + type: GCP + gcp: + resourceTags: + - {parentID: "1234567890", key: "key", value: "value"} + updated: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + platform: GCP + platformStatus: + type: GCP + gcp: + resourceTags: + - {parentID: "1234567890", key: "key", value: "value"} + - {parentID: "test-project-123", key: "new", value: "tag"} + expectedStatusError: "status.platformStatus.gcp.resourceTags: Invalid value: \"array\": resourceTags are immutable and may only be configured during installation" + - name: Should not be able to remove a Tag from an existing GCP ResourceTags + initial: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + platform: GCP + platformStatus: + type: GCP + gcp: + resourceTags: + - {parentID: "1234567890", key: "key1", value: "value1"} + - {parentID: "test-project-123", key: "key2", value: "value2"} + updated: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + platform: GCP + platformStatus: + type: GCP + gcp: + resourceTags: + - {parentID: "1234567890", key: "key1", value: "value1"} + expectedStatusError: "status.platformStatus.gcp.resourceTags: Invalid value: \"array\": resourceTags are immutable and may only be configured during installation" + - name: Should not be able to add GCP ResourceTags to an empty platformStatus.gcp + initial: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + platform: GCP + platformStatus: + type: GCP + gcp: {} + updated: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + platform: GCP + platformStatus: + gcp: + resourceTags: + - {parentID: "1234567890", key: "key", value: "value"} + expectedStatusError: "status.platformStatus.gcp: Invalid value: \"object\": resourceTags may only be configured during installation" + - name: Should not be able to remove GCP ResourceTags from platformStatus.gcp + initial: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + platform: GCP + platformStatus: + type: GCP + gcp: + resourceTags: + - {parentID: "1234567890", key: "key", value: "value"} + updated: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + platform: GCP + platformStatus: + type: GCP + gcp: {} + expectedStatusError: "status.platformStatus.gcp: Invalid value: \"object\": resourceTags may only be configured during installation" + - name: Should not be able to modify ParentID of a Tag in the GCP ResourceTags + initial: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + controlPlaneTopology: "HighlyAvailable" + infrastructureTopology: "HighlyAvailable" + platform: GCP + platformStatus: + type: GCP + gcp: + resourceTags: + - {parentID: "1234567890", key: "key", value: "value"} + updated: | + apiVersion: config.openshift.io/v1 + kind: Infrastructure + spec: {} + status: + platform: GCP + platformStatus: + type: GCP + gcp: + resourceTags: + - {parentID: "test-project-123", key: "key", value: "value"} + expectedStatusError: "status.platformStatus.gcp.resourceTags: Invalid value: \"array\": resourceTags are immutable and may only be configured during installation" diff --git a/vendor/github.com/openshift/api/config/v1/types_apiserver.go b/vendor/github.com/openshift/api/config/v1/types_apiserver.go index 31801aacf..5d18860c3 100644 --- a/vendor/github.com/openshift/api/config/v1/types_apiserver.go +++ b/vendor/github.com/openshift/api/config/v1/types_apiserver.go @@ -15,7 +15,10 @@ import ( // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 type APIServer struct { - metav1.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty"` // spec holds user settable values for configuration // +kubebuilder:validation:Required @@ -184,7 +187,7 @@ type APIServerEncryption struct { Type EncryptionType `json:"type,omitempty"` } -// +kubebuilder:validation:Enum="";identity;aescbc +// +kubebuilder:validation:Enum="";identity;aescbc;aesgcm type EncryptionType string const ( @@ -195,6 +198,10 @@ const ( // aescbc refers to a type where AES-CBC with PKCS#7 padding and a 32-byte key // is used to perform encryption at the datastore layer. EncryptionTypeAESCBC EncryptionType = "aescbc" + + // aesgcm refers to a type where AES-GCM with random nonce and a 32-byte key + // is used to perform encryption at the datastore layer. + EncryptionTypeAESGCM EncryptionType = "aesgcm" ) type APIServerStatus struct { @@ -206,6 +213,9 @@ type APIServerStatus struct { // +openshift:compatibility-gen:level=1 type APIServerList struct { metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata"` Items []APIServer `json:"items"` } diff --git a/vendor/github.com/openshift/api/config/v1/types_authentication.go b/vendor/github.com/openshift/api/config/v1/types_authentication.go index f00baa163..dd2ef6e0a 100644 --- a/vendor/github.com/openshift/api/config/v1/types_authentication.go +++ b/vendor/github.com/openshift/api/config/v1/types_authentication.go @@ -12,7 +12,10 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 type Authentication struct { - metav1.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty"` // spec holds user settable values for configuration @@ -94,6 +97,9 @@ type AuthenticationStatus struct { // +openshift:compatibility-gen:level=1 type AuthenticationList struct { metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata"` Items []Authentication `json:"items"` diff --git a/vendor/github.com/openshift/api/config/v1/types_build.go b/vendor/github.com/openshift/api/config/v1/types_build.go index 34f46a1f9..e9aef0375 100644 --- a/vendor/github.com/openshift/api/config/v1/types_build.go +++ b/vendor/github.com/openshift/api/config/v1/types_build.go @@ -17,7 +17,10 @@ import ( // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 type Build struct { - metav1.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty"` // Spec holds user-settable values for the build controller configuration @@ -115,6 +118,9 @@ type BuildOverrides struct { // +openshift:compatibility-gen:level=1 type BuildList struct { metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata"` Items []Build `json:"items"` diff --git a/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go b/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go index 7ce85f811..78666bb1e 100644 --- a/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go +++ b/vendor/github.com/openshift/api/config/v1/types_cluster_operator.go @@ -16,7 +16,10 @@ import ( // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 type ClusterOperator struct { - metav1.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata"` // spec holds configuration that could apply to any operator. @@ -204,6 +207,9 @@ const ( // +openshift:compatibility-gen:level=1 type ClusterOperatorList struct { metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata"` Items []ClusterOperator `json:"items"` diff --git a/vendor/github.com/openshift/api/config/v1/types_cluster_version.go b/vendor/github.com/openshift/api/config/v1/types_cluster_version.go index 195313eee..888a9658a 100644 --- a/vendor/github.com/openshift/api/config/v1/types_cluster_version.go +++ b/vendor/github.com/openshift/api/config/v1/types_cluster_version.go @@ -14,7 +14,10 @@ import ( // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 type ClusterVersion struct { - metav1.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty"` // spec is the desired state of the cluster version - the operator will work @@ -244,7 +247,7 @@ const ( ) // ClusterVersionCapability enumerates optional, core cluster components. -// +kubebuilder:validation:Enum=openshift-samples;baremetal;marketplace;Console;Insights;Storage;CSISnapshot +// +kubebuilder:validation:Enum=openshift-samples;baremetal;marketplace;Console;Insights;Storage;CSISnapshot;NodeTuning;MachineAPI;Build;DeploymentConfig type ClusterVersionCapability string const ( @@ -287,6 +290,46 @@ const ( // VolumeSnapshot CRD objects and manages the creation and deletion // lifecycle of volume snapshots ClusterVersionCapabilityCSISnapshot ClusterVersionCapability = "CSISnapshot" + + // ClusterVersionCapabilityNodeTuning manages the Node Tuning Operator + // which is responsible for watching the Tuned and Profile CRD + // objects and manages the containerized TuneD daemon which controls + // system level tuning of Nodes + ClusterVersionCapabilityNodeTuning ClusterVersionCapability = "NodeTuning" + + // ClusterVersionCapabilityMachineAPI manages + // machine-api-operator + // cluster-autoscaler-operator + // cluster-control-plane-machine-set-operator + // which is responsible for machines configuration and heavily + // targeted for SNO clusters. + // + // The following CRDs are disabled as well + // machines + // machineset + // controlplanemachineset + // + // WARNING: Do not disable that capability without reading + // documentation. This is important part of openshift system + // and may cause cluster damage + ClusterVersionCapabilityMachineAPI ClusterVersionCapability = "MachineAPI" + + // ClusterVersionCapabilityBuild manages the Build API which is responsible + // for watching the Build API objects and managing their lifecycle. + // The functionality is located under openshift-apiserver and openshift-controller-manager. + // + // The following resources are taken into account: + // - builds + // - buildconfigs + ClusterVersionCapabilityBuild ClusterVersionCapability = "Build" + + // ClusterVersionCapabilityDeploymentConfig manages the DeploymentConfig API + // which is responsible for watching the DeploymentConfig API and managing their lifecycle. + // The functionality is located under openshift-apiserver and openshift-controller-manager. + // + // The following resources are taken into account: + // - deploymentconfigs + ClusterVersionCapabilityDeploymentConfig ClusterVersionCapability = "DeploymentConfig" ) // KnownClusterVersionCapabilities includes all known optional, core cluster components. @@ -298,10 +341,14 @@ var KnownClusterVersionCapabilities = []ClusterVersionCapability{ ClusterVersionCapabilityStorage, ClusterVersionCapabilityOpenShiftSamples, ClusterVersionCapabilityCSISnapshot, + ClusterVersionCapabilityNodeTuning, + ClusterVersionCapabilityMachineAPI, + ClusterVersionCapabilityBuild, + ClusterVersionCapabilityDeploymentConfig, } // ClusterVersionCapabilitySet defines sets of cluster version capabilities. -// +kubebuilder:validation:Enum=None;v4.11;v4.12;vCurrent +// +kubebuilder:validation:Enum=None;v4.11;v4.12;v4.13;v4.14;vCurrent type ClusterVersionCapabilitySet string const ( @@ -321,6 +368,18 @@ const ( // version of OpenShift is installed. ClusterVersionCapabilitySet4_12 ClusterVersionCapabilitySet = "v4.12" + // ClusterVersionCapabilitySet4_13 is the recommended set of + // optional capabilities to enable for the 4.13 version of + // OpenShift. This list will remain the same no matter which + // version of OpenShift is installed. + ClusterVersionCapabilitySet4_13 ClusterVersionCapabilitySet = "v4.13" + + // ClusterVersionCapabilitySet4_14 is the recommended set of + // optional capabilities to enable for the 4.14 version of + // OpenShift. This list will remain the same no matter which + // version of OpenShift is installed. + ClusterVersionCapabilitySet4_14 ClusterVersionCapabilitySet = "v4.14" + // ClusterVersionCapabilitySetCurrent is the recommended set // of optional capabilities to enable for the cluster's // current version of OpenShift. @@ -344,6 +403,29 @@ var ClusterVersionCapabilitySets = map[ClusterVersionCapabilitySet][]ClusterVers ClusterVersionCapabilityOpenShiftSamples, ClusterVersionCapabilityCSISnapshot, }, + ClusterVersionCapabilitySet4_13: { + ClusterVersionCapabilityBaremetal, + ClusterVersionCapabilityConsole, + ClusterVersionCapabilityInsights, + ClusterVersionCapabilityMarketplace, + ClusterVersionCapabilityStorage, + ClusterVersionCapabilityOpenShiftSamples, + ClusterVersionCapabilityCSISnapshot, + ClusterVersionCapabilityNodeTuning, + }, + ClusterVersionCapabilitySet4_14: { + ClusterVersionCapabilityBaremetal, + ClusterVersionCapabilityConsole, + ClusterVersionCapabilityInsights, + ClusterVersionCapabilityMarketplace, + ClusterVersionCapabilityStorage, + ClusterVersionCapabilityOpenShiftSamples, + ClusterVersionCapabilityCSISnapshot, + ClusterVersionCapabilityNodeTuning, + ClusterVersionCapabilityMachineAPI, + ClusterVersionCapabilityBuild, + ClusterVersionCapabilityDeploymentConfig, + }, ClusterVersionCapabilitySetCurrent: { ClusterVersionCapabilityBaremetal, ClusterVersionCapabilityConsole, @@ -352,6 +434,10 @@ var ClusterVersionCapabilitySets = map[ClusterVersionCapabilitySet][]ClusterVers ClusterVersionCapabilityStorage, ClusterVersionCapabilityOpenShiftSamples, ClusterVersionCapabilityCSISnapshot, + ClusterVersionCapabilityNodeTuning, + ClusterVersionCapabilityMachineAPI, + ClusterVersionCapabilityBuild, + ClusterVersionCapabilityDeploymentConfig, }, } @@ -614,6 +700,9 @@ type PromQLClusterCondition struct { // +openshift:compatibility-gen:level=1 type ClusterVersionList struct { metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata"` Items []ClusterVersion `json:"items"` diff --git a/vendor/github.com/openshift/api/config/v1/types_console.go b/vendor/github.com/openshift/api/config/v1/types_console.go index e1a128827..928181849 100644 --- a/vendor/github.com/openshift/api/config/v1/types_console.go +++ b/vendor/github.com/openshift/api/config/v1/types_console.go @@ -15,7 +15,10 @@ import ( // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 type Console struct { - metav1.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty"` // spec holds user settable values for configuration @@ -46,6 +49,9 @@ type ConsoleStatus struct { // +openshift:compatibility-gen:level=1 type ConsoleList struct { metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata"` Items []Console `json:"items"` diff --git a/vendor/github.com/openshift/api/config/v1/types_dns.go b/vendor/github.com/openshift/api/config/v1/types_dns.go index c223f828e..5f8697673 100644 --- a/vendor/github.com/openshift/api/config/v1/types_dns.go +++ b/vendor/github.com/openshift/api/config/v1/types_dns.go @@ -11,7 +11,10 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 type DNS struct { - metav1.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty"` // spec holds user settable values for configuration @@ -50,6 +53,12 @@ type DNSSpec struct { // // +optional PrivateZone *DNSZone `json:"privateZone,omitempty"` + // platform holds configuration specific to the underlying + // infrastructure provider for DNS. + // When omitted, this means the user has no opinion and the platform is left + // to choose reasonable defaults. These defaults are subject to change over time. + // +optional + Platform DNSPlatformSpec `json:"platform,omitempty"` } // DNSZone is used to define a DNS hosted zone. @@ -86,7 +95,41 @@ type DNSStatus struct { // +openshift:compatibility-gen:level=1 type DNSList struct { metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata"` Items []DNS `json:"items"` } + +// DNSPlatformSpec holds cloud-provider-specific configuration +// for DNS administration. +// +union +// +kubebuilder:validation:XValidation:rule="has(self.type) && self.type == 'AWS' ? has(self.aws) : !has(self.aws)",message="aws configuration is required when platform is AWS, and forbidden otherwise" +type DNSPlatformSpec struct { + // type is the underlying infrastructure provider for the cluster. + // Allowed values: "", "AWS". + // + // Individual components may not support all platforms, + // and must handle unrecognized platforms with best-effort defaults. + // + // +unionDiscriminator + // +kubebuilder:validation:Required + // +kubebuilder:validation:XValidation:rule="self in ['','AWS']",message="allowed values are '' and 'AWS'" + Type PlatformType `json:"type"` + + // aws contains DNS configuration specific to the Amazon Web Services cloud provider. + // +optional + AWS *AWSDNSSpec `json:"aws"` +} + +// AWSDNSSpec contains DNS configuration specific to the Amazon Web Services cloud provider. +type AWSDNSSpec struct { + // privateZoneIAMRole contains the ARN of an IAM role that should be assumed when performing + // operations on the cluster's private hosted zone specified in the cluster DNS config. + // When left empty, no role should be assumed. + // +kubebuilder:validation:Pattern:=`^arn:(aws|aws-cn|aws-us-gov):iam::[0-9]{12}:role\/.*$` + // +optional + PrivateZoneIAMRole string `json:"privateZoneIAMRole"` +} diff --git a/vendor/github.com/openshift/api/config/v1/types_feature.go b/vendor/github.com/openshift/api/config/v1/types_feature.go index 847a73323..88835ae53 100644 --- a/vendor/github.com/openshift/api/config/v1/types_feature.go +++ b/vendor/github.com/openshift/api/config/v1/types_feature.go @@ -1,6 +1,10 @@ package v1 -import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) // +genclient // +genclient:nonNamespaced @@ -11,7 +15,10 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 type FeatureGate struct { - metav1.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty"` // spec holds user settable values for configuration @@ -65,13 +72,57 @@ type FeatureGateSelection struct { type CustomFeatureGates struct { // enabled is a list of all feature gates that you want to force on // +optional - Enabled []string `json:"enabled,omitempty"` + Enabled []FeatureGateName `json:"enabled,omitempty"` // disabled is a list of all feature gates that you want to force off // +optional - Disabled []string `json:"disabled,omitempty"` + Disabled []FeatureGateName `json:"disabled,omitempty"` } +// FeatureGateName is a string to enforce patterns on the name of a FeatureGate +// +kubebuilder:validation:Pattern=`^([A-Za-z0-9-]+\.)*[A-Za-z0-9-]+\.?$` +type FeatureGateName string + type FeatureGateStatus struct { + // conditions represent the observations of the current state. + // Known .status.conditions.type are: "DeterminationDegraded" + // +listType=map + // +listMapKey=type + Conditions []metav1.Condition `json:"conditions,omitempty"` + + // featureGates contains a list of enabled and disabled featureGates that are keyed by payloadVersion. + // Operators other than the CVO and cluster-config-operator, must read the .status.featureGates, locate + // the version they are managing, find the enabled/disabled featuregates and make the operand and operator match. + // The enabled/disabled values for a particular version may change during the life of the cluster as various + // .spec.featureSet values are selected. + // Operators may choose to restart their processes to pick up these changes, but remembering past enable/disable + // lists is beyond the scope of this API and is the responsibility of individual operators. + // Only featureGates with .version in the ClusterVersion.status will be present in this list. + // +listType=map + // +listMapKey=version + FeatureGates []FeatureGateDetails `json:"featureGates"` +} + +type FeatureGateDetails struct { + // version matches the version provided by the ClusterVersion and in the ClusterOperator.Status.Versions field. + // +kubebuilder:validation:Required + // +required + Version string `json:"version"` + // enabled is a list of all feature gates that are enabled in the cluster for the named version. + // +optional + Enabled []FeatureGateAttributes `json:"enabled"` + // disabled is a list of all feature gates that are disabled in the cluster for the named version. + // +optional + Disabled []FeatureGateAttributes `json:"disabled"` +} + +type FeatureGateAttributes struct { + // name is the name of the FeatureGate. + // +kubebuilder:validation:Required + Name FeatureGateName `json:"name"` + + // possible (probable?) future additions include + // 1. support level (Stable, ServiceDeliveryOnly, TechPreview, DevPreview) + // 2. description } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -80,14 +131,17 @@ type FeatureGateStatus struct { // +openshift:compatibility-gen:level=1 type FeatureGateList struct { metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata"` Items []FeatureGate `json:"items"` } type FeatureGateEnabledDisabled struct { - Enabled []string - Disabled []string + Enabled []FeatureGateDescription + Disabled []FeatureGateDescription } // FeatureSets Contains a map of Feature names to Enabled/Disabled Feature. @@ -105,78 +159,101 @@ type FeatureGateEnabledDisabled struct { var FeatureSets = map[FeatureSet]*FeatureGateEnabledDisabled{ Default: defaultFeatures, CustomNoUpgrade: { - Enabled: []string{}, - Disabled: []string{}, + Enabled: []FeatureGateDescription{}, + Disabled: []FeatureGateDescription{}, }, TechPreviewNoUpgrade: newDefaultFeatures(). - with("ExternalCloudProvider"). // sig-cloud-provider, jspeed, OCP specific - with("CSIDriverSharedResource"). // sig-build, adkaplan, OCP specific - with("BuildCSIVolumes"). // sig-build, adkaplan, OCP specific - with("NodeSwap"). // sig-node, ehashman, Kubernetes feature gate - with("MachineAPIProviderOpenStack"). // openstack, egarcia (#forum-openstack), OCP specific - with("CGroupsV2"). // sig-node, harche, OCP specific - with("Crun"). // sig-node, haircommander, OCP specific - with("InsightsConfigAPI"). // insights, tremes (#ccx), OCP specific - with("CSIInlineVolumeAdmission"). // sig-storage, jdobson, OCP specific - with("MatchLabelKeysInPodTopologySpread"). // sig-scheduling, ingvagabund (#forum-workloads), Kubernetes feature gate - with("OpenShiftPodSecurityAdmission"). // bz-auth, standa, OCP specific - toFeatures(), + with(externalCloudProvider). + with(externalCloudProviderGCP). + with(csiDriverSharedResource). + with(buildCSIVolumes). + with(nodeSwap). + with(machineAPIProviderOpenStack). + with(insightsConfigAPI). + with(retroactiveDefaultStorageClass). + with(pdbUnhealthyPodEvictionPolicy). + with(dynamicResourceAllocation). + with(admissionWebhookMatchConditions). + with(azureWorkloadIdentity). + with(gateGatewayAPI). + with(maxUnavailableStatefulSet). + without(eventedPleg). + with(sigstoreImageVerification). + with(gcpLabelsTags). + with(vSphereStaticIPs). + with(routeExternalCertificate). + with(automatedEtcdBackup). + without(machineAPIOperatorDisableMachineHealthCheckController). + with(adminNetworkPolicy). + toFeatures(defaultFeatures), LatencySensitive: newDefaultFeatures(). - with( - "TopologyManager", // sig-pod, sjenning - ). - toFeatures(), + toFeatures(defaultFeatures), } var defaultFeatures = &FeatureGateEnabledDisabled{ - Enabled: []string{ - "APIPriorityAndFairness", // sig-apimachinery, deads2k - "RotateKubeletServerCertificate", // sig-pod, sjenning - "DownwardAPIHugePages", // sig-node, rphillips + Enabled: []FeatureGateDescription{ + openShiftPodSecurityAdmission, + alibabaPlatform, // This is a bug, it should be TechPreviewNoUpgrade. This must be downgraded before 4.14 is shipped. + cloudDualStackNodeIPs, + externalCloudProviderAzure, + externalCloudProviderExternal, + privateHostedZoneAWS, + }, + Disabled: []FeatureGateDescription{ + retroactiveDefaultStorageClass, }, - Disabled: []string{}, } type featureSetBuilder struct { - forceOn []string - forceOff []string + forceOn []FeatureGateDescription + forceOff []FeatureGateDescription } func newDefaultFeatures() *featureSetBuilder { return &featureSetBuilder{} } -func (f *featureSetBuilder) with(forceOn ...string) *featureSetBuilder { - f.forceOn = append(f.forceOn, forceOn...) +func (f *featureSetBuilder) with(forceOn FeatureGateDescription) *featureSetBuilder { + for _, curr := range f.forceOn { + if curr.FeatureGateAttributes.Name == forceOn.FeatureGateAttributes.Name { + panic(fmt.Errorf("coding error: %q enabled twice", forceOn.FeatureGateAttributes.Name)) + } + } + f.forceOn = append(f.forceOn, forceOn) return f } -func (f *featureSetBuilder) without(forceOff ...string) *featureSetBuilder { - f.forceOff = append(f.forceOff, forceOff...) +func (f *featureSetBuilder) without(forceOff FeatureGateDescription) *featureSetBuilder { + for _, curr := range f.forceOff { + if curr.FeatureGateAttributes.Name == forceOff.FeatureGateAttributes.Name { + panic(fmt.Errorf("coding error: %q disabled twice", forceOff.FeatureGateAttributes.Name)) + } + } + f.forceOff = append(f.forceOff, forceOff) return f } -func (f *featureSetBuilder) isForcedOff(needle string) bool { +func (f *featureSetBuilder) isForcedOff(needle FeatureGateDescription) bool { for _, forcedOff := range f.forceOff { - if needle == forcedOff { + if needle.FeatureGateAttributes.Name == forcedOff.FeatureGateAttributes.Name { return true } } return false } -func (f *featureSetBuilder) isForcedOn(needle string) bool { +func (f *featureSetBuilder) isForcedOn(needle FeatureGateDescription) bool { for _, forceOn := range f.forceOn { - if needle == forceOn { + if needle.FeatureGateAttributes.Name == forceOn.FeatureGateAttributes.Name { return true } } return false } -func (f *featureSetBuilder) toFeatures() *FeatureGateEnabledDisabled { - finalOn := []string{} - finalOff := []string{} +func (f *featureSetBuilder) toFeatures(defaultFeatures *FeatureGateEnabledDisabled) *FeatureGateEnabledDisabled { + finalOn := []FeatureGateDescription{} + finalOff := []FeatureGateDescription{} // only add the default enabled features if they haven't been explicitly set off for _, defaultOn := range defaultFeatures.Enabled { @@ -188,6 +265,16 @@ func (f *featureSetBuilder) toFeatures() *FeatureGateEnabledDisabled { if f.isForcedOff(currOn) { panic("coding error, you can't have features both on and off") } + found := false + for _, alreadyOn := range finalOn { + if alreadyOn.FeatureGateAttributes.Name == currOn.FeatureGateAttributes.Name { + found = true + } + } + if found { + continue + } + finalOn = append(finalOn, currOn) } diff --git a/vendor/github.com/openshift/api/config/v1/types_image.go b/vendor/github.com/openshift/api/config/v1/types_image.go index 08a31072d..eb7643f2b 100644 --- a/vendor/github.com/openshift/api/config/v1/types_image.go +++ b/vendor/github.com/openshift/api/config/v1/types_image.go @@ -16,7 +16,10 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 type Image struct { - metav1.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty"` // spec holds user settable values for configuration @@ -84,6 +87,9 @@ type ImageStatus struct { // +openshift:compatibility-gen:level=1 type ImageList struct { metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata"` Items []Image `json:"items"` diff --git a/vendor/github.com/openshift/api/config/v1/types_image_content_policy.go b/vendor/github.com/openshift/api/config/v1/types_image_content_policy.go index 8ccad9c53..3dc315f68 100644 --- a/vendor/github.com/openshift/api/config/v1/types_image_content_policy.go +++ b/vendor/github.com/openshift/api/config/v1/types_image_content_policy.go @@ -12,7 +12,10 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 type ImageContentPolicy struct { - metav1.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty"` // spec holds user settable values for configuration @@ -55,6 +58,9 @@ type ImageContentPolicySpec struct { // +openshift:compatibility-gen:level=1 type ImageContentPolicyList struct { metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata"` Items []ImageContentPolicy `json:"items"` diff --git a/vendor/github.com/openshift/api/config/v1/types_image_digest_mirror_set.go b/vendor/github.com/openshift/api/config/v1/types_image_digest_mirror_set.go index b84f65e11..987c6cfdc 100644 --- a/vendor/github.com/openshift/api/config/v1/types_image_digest_mirror_set.go +++ b/vendor/github.com/openshift/api/config/v1/types_image_digest_mirror_set.go @@ -12,7 +12,10 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 type ImageDigestMirrorSet struct { - metav1.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty"` // spec holds user settable values for configuration @@ -66,6 +69,9 @@ type ImageDigestMirrorSetStatus struct{} // +openshift:compatibility-gen:level=1 type ImageDigestMirrorSetList struct { metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata"` Items []ImageDigestMirrorSet `json:"items"` diff --git a/vendor/github.com/openshift/api/config/v1/types_image_tag_mirror_set.go b/vendor/github.com/openshift/api/config/v1/types_image_tag_mirror_set.go index 9d643e708..295522e59 100644 --- a/vendor/github.com/openshift/api/config/v1/types_image_tag_mirror_set.go +++ b/vendor/github.com/openshift/api/config/v1/types_image_tag_mirror_set.go @@ -12,7 +12,10 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 type ImageTagMirrorSet struct { - metav1.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty"` // spec holds user settable values for configuration @@ -66,6 +69,9 @@ type ImageTagMirrorSetStatus struct{} // +openshift:compatibility-gen:level=1 type ImageTagMirrorSetList struct { metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata"` Items []ImageTagMirrorSet `json:"items"` diff --git a/vendor/github.com/openshift/api/config/v1/types_infrastructure.go b/vendor/github.com/openshift/api/config/v1/types_infrastructure.go index f1f1697a7..6c791ee8c 100644 --- a/vendor/github.com/openshift/api/config/v1/types_infrastructure.go +++ b/vendor/github.com/openshift/api/config/v1/types_infrastructure.go @@ -12,7 +12,10 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 type Infrastructure struct { - metav1.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty"` // spec holds user settable values for configuration @@ -101,6 +104,19 @@ type InfrastructureStatus struct { // +kubebuilder:default=HighlyAvailable // +kubebuilder:validation:Enum=HighlyAvailable;SingleReplica InfrastructureTopology TopologyMode `json:"infrastructureTopology"` + + // cpuPartitioning expresses if CPU partitioning is a currently enabled feature in the cluster. + // CPU Partitioning means that this cluster can support partitioning workloads to specific CPU Sets. + // Valid values are "None" and "AllNodes". When omitted, the default value is "None". + // The default value of "None" indicates that no nodes will be setup with CPU partitioning. + // The "AllNodes" value indicates that all nodes have been setup with CPU partitioning, + // and can then be further configured via the PerformanceProfile API. + // +kubebuilder:default=None + // +default="None" + // +kubebuilder:validation:Enum=None;AllNodes + // +openshift:enable:FeatureSets=CustomNoUpgrade;TechPreviewNoUpgrade + // +optional + CPUPartitioning CPUPartitioningMode `json:"cpuPartitioning,omitempty"` } // TopologyMode defines the topology mode of the control/infra nodes. @@ -123,6 +139,28 @@ const ( ExternalTopologyMode TopologyMode = "External" ) +// CPUPartitioningMode defines the mode for CPU partitioning +type CPUPartitioningMode string + +const ( + // CPUPartitioningNone means that no CPU Partitioning is on in this cluster infrastructure + CPUPartitioningNone CPUPartitioningMode = "None" + + // CPUPartitioningAllNodes means that all nodes are configured with CPU Partitioning in this cluster + CPUPartitioningAllNodes CPUPartitioningMode = "AllNodes" +) + +// PlatformLoadBalancerType defines the type of load balancer used by the cluster. +type PlatformLoadBalancerType string + +const ( + // LoadBalancerTypeUserManaged is a load balancer with control-plane VIPs managed outside of the cluster by the customer. + LoadBalancerTypeUserManaged PlatformLoadBalancerType = "UserManaged" + + // LoadBalancerTypeOpenShiftManagedDefault is the default load balancer with control-plane VIPs managed by the OpenShift cluster. + LoadBalancerTypeOpenShiftManagedDefault PlatformLoadBalancerType = "OpenShiftManagedDefault" +) + // PlatformType is a specific supported infrastructure provider. // +kubebuilder:validation:Enum="";AWS;Azure;BareMetal;GCP;Libvirt;OpenStack;None;VSphere;oVirt;IBMCloud;KubeVirt;EquinixMetal;PowerVS;AlibabaCloud;Nutanix;External type PlatformType string @@ -192,36 +230,6 @@ const ( IBMCloudProviderTypeUPI IBMCloudProviderType = "UPI" ) -// CloudControllerManagerState defines whether Cloud Controller Manager presence is expected or not -type CloudControllerManagerState string - -const ( - // Cloud Controller Manager is enabled and expected to be installed. - // This value indicates that new nodes should be tainted as uninitialized when created, - // preventing them from running workloads until they are initialized by the cloud controller manager. - CloudControllerManagerExternal CloudControllerManagerState = "External" - - // Cloud Controller Manager is disabled and not expected to be installed. - // This value indicates that new nodes should not be tainted - // and no extra node initialization is expected from the cloud controller manager. - CloudControllerManagerNone CloudControllerManagerState = "None" -) - -// CloudControllerManagerSpec holds Cloud Controller Manager (a.k.a. CCM or CPI) related settings -type CloudControllerManagerSpec struct { - // state determines whether or not an external Cloud Controller Manager is expected to - // be installed within the cluster. - // https://kubernetes.io/docs/tasks/administer-cluster/running-cloud-controller/#running-cloud-controller-manager - // - // When set to "External", new nodes will be tainted as uninitialized when created, - // preventing them from running workloads until they are initialized by the cloud controller manager. - // When omitted or set to "None", new nodes will be not tainted - // and no extra initialization from the cloud controller manager is expected. - // +kubebuilder:validation:Enum="";External;None - // +optional - State CloudControllerManagerState `json:"state"` -} - // ExternalPlatformSpec holds the desired state for the generic External infrastructure provider. type ExternalPlatformSpec struct { // PlatformName holds the arbitrary string representing the infrastructure provider name, expected to be set at the installation time. @@ -231,9 +239,6 @@ type ExternalPlatformSpec struct { // +kubebuilder:validation:XValidation:rule="oldSelf == 'Unknown' || self == oldSelf",message="platform name cannot be changed once set" // +optional PlatformName string `json:"platformName,omitempty"` - // CloudControllerManager contains settings specific to the external Cloud Controller Manager (a.k.a. CCM or CPI) - // +optional - CloudControllerManager CloudControllerManagerSpec `json:"cloudControllerManager"` } // PlatformSpec holds the desired state specific to the underlying infrastructure provider @@ -310,8 +315,48 @@ type PlatformSpec struct { External *ExternalPlatformSpec `json:"external,omitempty"` } +// CloudControllerManagerState defines whether Cloud Controller Manager presence is expected or not +type CloudControllerManagerState string + +const ( + // Cloud Controller Manager is enabled and expected to be installed. + // This value indicates that new nodes should be tainted as uninitialized when created, + // preventing them from running workloads until they are initialized by the cloud controller manager. + CloudControllerManagerExternal CloudControllerManagerState = "External" + + // Cloud Controller Manager is disabled and not expected to be installed. + // This value indicates that new nodes should not be tainted + // and no extra node initialization is expected from the cloud controller manager. + CloudControllerManagerNone CloudControllerManagerState = "None" +) + +// CloudControllerManagerStatus holds the state of Cloud Controller Manager (a.k.a. CCM or CPI) related settings +// +kubebuilder:validation:XValidation:rule="(has(self.state) == has(oldSelf.state)) || (!has(oldSelf.state) && self.state != \"External\")",message="state may not be added or removed once set" +type CloudControllerManagerStatus struct { + // state determines whether or not an external Cloud Controller Manager is expected to + // be installed within the cluster. + // https://kubernetes.io/docs/tasks/administer-cluster/running-cloud-controller/#running-cloud-controller-manager + // + // Valid values are "External", "None" and omitted. + // When set to "External", new nodes will be tainted as uninitialized when created, + // preventing them from running workloads until they are initialized by the cloud controller manager. + // When omitted or set to "None", new nodes will be not tainted + // and no extra initialization from the cloud controller manager is expected. + // +kubebuilder:validation:Enum="";External;None + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="state is immutable once set" + // +optional + State CloudControllerManagerState `json:"state"` +} + // ExternalPlatformStatus holds the current status of the generic External infrastructure provider. -type ExternalPlatformStatus struct{} +// +kubebuilder:validation:XValidation:rule="has(self.cloudControllerManager) == has(oldSelf.cloudControllerManager)",message="cloudControllerManager may not be added or removed once set" +type ExternalPlatformStatus struct { + // cloudControllerManager contains settings specific to the external Cloud Controller Manager (a.k.a. CCM or CPI). + // When omitted, new nodes will be not tainted + // and no extra initialization from the cloud controller manager is expected. + // +optional + CloudControllerManager CloudControllerManagerStatus `json:"cloudControllerManager"` +} // PlatformStatus holds the current status specific to the underlying infrastructure provider // of the current cluster. Since these are used at status-level for the underlying cluster, it @@ -460,6 +505,7 @@ type AWSResourceTag struct { type AzurePlatformSpec struct{} // AzurePlatformStatus holds the current status of the Azure infrastructure provider. +// +kubebuilder:validation:XValidation:rule="!has(oldSelf.resourceTags) && !has(self.resourceTags) || has(oldSelf.resourceTags) && has(self.resourceTags)",message="resourceTags may only be configured during installation" type AzurePlatformStatus struct { // resourceGroupName is the Resource Group for new Azure resources created for the cluster. ResourceGroupName string `json:"resourceGroupName"` @@ -478,6 +524,34 @@ type AzurePlatformStatus struct { // armEndpoint specifies a URL to use for resource management in non-soverign clouds such as Azure Stack. // +optional ARMEndpoint string `json:"armEndpoint,omitempty"` + + // resourceTags is a list of additional tags to apply to Azure resources created for the cluster. + // See https://docs.microsoft.com/en-us/rest/api/resources/tags for information on tagging Azure resources. + // Due to limitations on Automation, Content Delivery Network, DNS Azure resources, a maximum of 15 tags + // may be applied. OpenShift reserves 5 tags for internal use, allowing 10 tags for user configuration. + // +kubebuilder:validation:MaxItems=10 + // +kubebuilder:validation:XValidation:rule="self.all(x, x in oldSelf) && oldSelf.all(x, x in self)",message="resourceTags are immutable and may only be configured during installation" + // +optional + ResourceTags []AzureResourceTag `json:"resourceTags,omitempty"` +} + +// AzureResourceTag is a tag to apply to Azure resources created for the cluster. +type AzureResourceTag struct { + // key is the key part of the tag. A tag key can have a maximum of 128 characters and cannot be empty. Key + // must begin with a letter, end with a letter, number or underscore, and must contain only alphanumeric + // characters and the following special characters `_ . -`. + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=128 + // +kubebuilder:validation:Pattern=`^[a-zA-Z]([0-9A-Za-z_.-]*[0-9A-Za-z_])?$` + Key string `json:"key"` + // value is the value part of the tag. A tag value can have a maximum of 256 characters and cannot be empty. Value + // must contain only alphanumeric characters and the following special characters `_ + , - . / : ; < = > ? @`. + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=256 + // +kubebuilder:validation:Pattern=`^[0-9A-Za-z_.=+-@]+$` + Value string `json:"value"` } // AzureCloudEnvironment is the name of the Azure cloud environment @@ -506,12 +580,114 @@ const ( type GCPPlatformSpec struct{} // GCPPlatformStatus holds the current status of the Google Cloud Platform infrastructure provider. +// +openshift:validation:FeatureSetAwareXValidation:featureSet=CustomNoUpgrade;TechPreviewNoUpgrade,rule="!has(oldSelf.resourceLabels) && !has(self.resourceLabels) || has(oldSelf.resourceLabels) && has(self.resourceLabels)",message="resourceLabels may only be configured during installation" +// +openshift:validation:FeatureSetAwareXValidation:featureSet=CustomNoUpgrade;TechPreviewNoUpgrade,rule="!has(oldSelf.resourceTags) && !has(self.resourceTags) || has(oldSelf.resourceTags) && has(self.resourceTags)",message="resourceTags may only be configured during installation" type GCPPlatformStatus struct { // resourceGroupName is the Project ID for new GCP resources created for the cluster. ProjectID string `json:"projectID"` // region holds the region for new GCP resources created for the cluster. Region string `json:"region"` + + // resourceLabels is a list of additional labels to apply to GCP resources created for the cluster. + // See https://cloud.google.com/compute/docs/labeling-resources for information on labeling GCP resources. + // GCP supports a maximum of 64 labels per resource. OpenShift reserves 32 labels for internal use, + // allowing 32 labels for user configuration. + // +kubebuilder:validation:MaxItems=32 + // +kubebuilder:validation:XValidation:rule="self.all(x, x in oldSelf) && oldSelf.all(x, x in self)",message="resourceLabels are immutable and may only be configured during installation" + // +listType=map + // +listMapKey=key + // +optional + // +openshift:enable:FeatureSets=CustomNoUpgrade;TechPreviewNoUpgrade + ResourceLabels []GCPResourceLabel `json:"resourceLabels,omitempty"` + + // resourceTags is a list of additional tags to apply to GCP resources created for the cluster. + // See https://cloud.google.com/resource-manager/docs/tags/tags-overview for information on + // tagging GCP resources. GCP supports a maximum of 50 tags per resource. + // +kubebuilder:validation:MaxItems=50 + // +kubebuilder:validation:XValidation:rule="self.all(x, x in oldSelf) && oldSelf.all(x, x in self)",message="resourceTags are immutable and may only be configured during installation" + // +listType=map + // +listMapKey=key + // +optional + // +openshift:enable:FeatureSets=CustomNoUpgrade;TechPreviewNoUpgrade + ResourceTags []GCPResourceTag `json:"resourceTags,omitempty"` +} + +// GCPResourceLabel is a label to apply to GCP resources created for the cluster. +type GCPResourceLabel struct { + // key is the key part of the label. A label key can have a maximum of 63 characters and cannot be empty. + // Label key must begin with a lowercase letter, and must contain only lowercase letters, numeric characters, + // and the following special characters `_-`. Label key must not have the reserved prefixes `kubernetes-io` + // and `openshift-io`. + // +kubebuilder:validation:XValidation:rule="!self.startsWith('openshift-io') && !self.startsWith('kubernetes-io')",message="label keys must not start with either `openshift-io` or `kubernetes-io`" + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=63 + // +kubebuilder:validation:Pattern=`^[a-z][0-9a-z_-]+$` + Key string `json:"key"` + + // value is the value part of the label. A label value can have a maximum of 63 characters and cannot be empty. + // Value must contain only lowercase letters, numeric characters, and the following special characters `_-`. + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=63 + // +kubebuilder:validation:Pattern=`^[0-9a-z_-]+$` + Value string `json:"value"` +} + +// GCPResourceTag is a tag to apply to GCP resources created for the cluster. +type GCPResourceTag struct { + // parentID is the ID of the hierarchical resource where the tags are defined, + // e.g. at the Organization or the Project level. To find the Organization or Project ID refer to the following pages: + // https://cloud.google.com/resource-manager/docs/creating-managing-organization#retrieving_your_organization_id, + // https://cloud.google.com/resource-manager/docs/creating-managing-projects#identifying_projects. + // An OrganizationID must consist of decimal numbers, and cannot have leading zeroes. + // A ProjectID must be 6 to 30 characters in length, can only contain lowercase letters, numbers, + // and hyphens, and must start with a letter, and cannot end with a hyphen. + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=32 + // +kubebuilder:validation:Pattern=`(^[1-9][0-9]{0,31}$)|(^[a-z][a-z0-9-]{4,28}[a-z0-9]$)` + ParentID string `json:"parentID"` + + // key is the key part of the tag. A tag key can have a maximum of 63 characters and cannot be empty. + // Tag key must begin and end with an alphanumeric character, and must contain only uppercase, lowercase + // alphanumeric characters, and the following special characters `._-`. + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=63 + // +kubebuilder:validation:Pattern=`^[a-zA-Z0-9]([0-9A-Za-z_.-]{0,61}[a-zA-Z0-9])?$` + Key string `json:"key"` + + // value is the value part of the tag. A tag value can have a maximum of 63 characters and cannot be empty. + // Tag value must begin and end with an alphanumeric character, and must contain only uppercase, lowercase + // alphanumeric characters, and the following special characters `_-.@%=+:,*#&(){}[]` and spaces. + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=63 + // +kubebuilder:validation:Pattern=`^[a-zA-Z0-9]([0-9A-Za-z_.@%=+:,*#&()\[\]{}\-\s]{0,61}[a-zA-Z0-9])?$` + Value string `json:"value"` +} + +// BareMetalPlatformLoadBalancer defines the load balancer used by the cluster on BareMetal platform. +// +union +type BareMetalPlatformLoadBalancer struct { + // type defines the type of load balancer used by the cluster on BareMetal platform + // which can be a user-managed or openshift-managed load balancer + // that is to be used for the OpenShift API and Ingress endpoints. + // When set to OpenShiftManagedDefault the static pods in charge of API and Ingress traffic load-balancing + // defined in the machine config operator will be deployed. + // When set to UserManaged these static pods will not be deployed and it is expected that + // the load balancer is configured out of band by the deployer. + // When omitted, this means no opinion and the platform is left to choose a reasonable default. + // The default value is OpenShiftManagedDefault. + // +default="OpenShiftManagedDefault" + // +kubebuilder:default:="OpenShiftManagedDefault" + // +kubebuilder:validation:Enum:="OpenShiftManagedDefault";"UserManaged" + // +kubebuilder:validation:XValidation:rule="oldSelf == '' || self == oldSelf",message="type is immutable once set" + // +optional + // +unionDiscriminator + Type PlatformLoadBalancerType `json:"type,omitempty"` } // BareMetalPlatformSpec holds the desired state of the BareMetal infrastructure provider. @@ -562,6 +738,34 @@ type BareMetalPlatformStatus struct { // datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames // to the nodes in the cluster. NodeDNSIP string `json:"nodeDNSIP,omitempty"` + + // loadBalancer defines how the load balancer used by the cluster is configured. + // +default={"type": "OpenShiftManagedDefault"} + // +kubebuilder:default={"type": "OpenShiftManagedDefault"} + // +openshift:enable:FeatureSets=CustomNoUpgrade;TechPreviewNoUpgrade + // +optional + LoadBalancer *BareMetalPlatformLoadBalancer `json:"loadBalancer,omitempty"` +} + +// OpenStackPlatformLoadBalancer defines the load balancer used by the cluster on OpenStack platform. +// +union +type OpenStackPlatformLoadBalancer struct { + // type defines the type of load balancer used by the cluster on OpenStack platform + // which can be a user-managed or openshift-managed load balancer + // that is to be used for the OpenShift API and Ingress endpoints. + // When set to OpenShiftManagedDefault the static pods in charge of API and Ingress traffic load-balancing + // defined in the machine config operator will be deployed. + // When set to UserManaged these static pods will not be deployed and it is expected that + // the load balancer is configured out of band by the deployer. + // When omitted, this means no opinion and the platform is left to choose a reasonable default. + // The default value is OpenShiftManagedDefault. + // +default="OpenShiftManagedDefault" + // +kubebuilder:default:="OpenShiftManagedDefault" + // +kubebuilder:validation:Enum:="OpenShiftManagedDefault";"UserManaged" + // +kubebuilder:validation:XValidation:rule="oldSelf == '' || self == oldSelf",message="type is immutable once set" + // +optional + // +unionDiscriminator + Type PlatformLoadBalancerType `json:"type,omitempty"` } // OpenStackPlatformSpec holds the desired state of the OpenStack infrastructure provider. @@ -614,6 +818,33 @@ type OpenStackPlatformStatus struct { // datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames // to the nodes in the cluster. NodeDNSIP string `json:"nodeDNSIP,omitempty"` + + // loadBalancer defines how the load balancer used by the cluster is configured. + // +default={"type": "OpenShiftManagedDefault"} + // +kubebuilder:default={"type": "OpenShiftManagedDefault"} + // +optional + LoadBalancer *OpenStackPlatformLoadBalancer `json:"loadBalancer,omitempty"` +} + +// OvirtPlatformLoadBalancer defines the load balancer used by the cluster on Ovirt platform. +// +union +type OvirtPlatformLoadBalancer struct { + // type defines the type of load balancer used by the cluster on Ovirt platform + // which can be a user-managed or openshift-managed load balancer + // that is to be used for the OpenShift API and Ingress endpoints. + // When set to OpenShiftManagedDefault the static pods in charge of API and Ingress traffic load-balancing + // defined in the machine config operator will be deployed. + // When set to UserManaged these static pods will not be deployed and it is expected that + // the load balancer is configured out of band by the deployer. + // When omitted, this means no opinion and the platform is left to choose a reasonable default. + // The default value is OpenShiftManagedDefault. + // +default="OpenShiftManagedDefault" + // +kubebuilder:default:="OpenShiftManagedDefault" + // +kubebuilder:validation:Enum:="OpenShiftManagedDefault";"UserManaged" + // +kubebuilder:validation:XValidation:rule="oldSelf == '' || self == oldSelf",message="type is immutable once set" + // +optional + // +unionDiscriminator + Type PlatformLoadBalancerType `json:"type,omitempty"` } // OvirtPlatformSpec holds the desired state of the oVirt infrastructure provider. @@ -657,6 +888,34 @@ type OvirtPlatformStatus struct { // deprecated: as of 4.6, this field is no longer set or honored. It will be removed in a future release. NodeDNSIP string `json:"nodeDNSIP,omitempty"` + + // loadBalancer defines how the load balancer used by the cluster is configured. + // +default={"type": "OpenShiftManagedDefault"} + // +kubebuilder:default={"type": "OpenShiftManagedDefault"} + // +openshift:enable:FeatureSets=CustomNoUpgrade;TechPreviewNoUpgrade + // +optional + LoadBalancer *OvirtPlatformLoadBalancer `json:"loadBalancer,omitempty"` +} + +// VSpherePlatformLoadBalancer defines the load balancer used by the cluster on VSphere platform. +// +union +type VSpherePlatformLoadBalancer struct { + // type defines the type of load balancer used by the cluster on VSphere platform + // which can be a user-managed or openshift-managed load balancer + // that is to be used for the OpenShift API and Ingress endpoints. + // When set to OpenShiftManagedDefault the static pods in charge of API and Ingress traffic load-balancing + // defined in the machine config operator will be deployed. + // When set to UserManaged these static pods will not be deployed and it is expected that + // the load balancer is configured out of band by the deployer. + // When omitted, this means no opinion and the platform is left to choose a reasonable default. + // The default value is OpenShiftManagedDefault. + // +default="OpenShiftManagedDefault" + // +kubebuilder:default:="OpenShiftManagedDefault" + // +kubebuilder:validation:Enum:="OpenShiftManagedDefault";"UserManaged" + // +kubebuilder:validation:XValidation:rule="oldSelf == '' || self == oldSelf",message="type is immutable once set" + // +optional + // +unionDiscriminator + Type PlatformLoadBalancerType `json:"type,omitempty"` } // VSpherePlatformFailureDomainSpec holds the region and zone failure domain and @@ -834,7 +1093,6 @@ type VSpherePlatformSpec struct { // --- // + If VCenters is not defined use the existing cloud-config configmap defined // + in openshift-config. - // +openshift:enable:FeatureSets=TechPreviewNoUpgrade // +kubebuilder:validation:MaxItems=1 // +kubebuilder:validation:MinItems=0 // +optional @@ -842,7 +1100,6 @@ type VSpherePlatformSpec struct { // failureDomains contains the definition of region, zone and the vCenter topology. // If this is omitted failure domains (regions and zones) will not be used. - // +openshift:enable:FeatureSets=TechPreviewNoUpgrade // +optional FailureDomains []VSpherePlatformFailureDomainSpec `json:"failureDomains,omitempty"` @@ -851,7 +1108,6 @@ type VSpherePlatformSpec struct { // If this field is omitted, networking defaults to the legacy // address selection behavior which is to only support a single address and // return the first one found. - // +openshift:enable:FeatureSets=TechPreviewNoUpgrade // +optional NodeNetworking VSpherePlatformNodeNetworking `json:"nodeNetworking,omitempty"` } @@ -898,6 +1154,13 @@ type VSpherePlatformStatus struct { // datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames // to the nodes in the cluster. NodeDNSIP string `json:"nodeDNSIP,omitempty"` + + // loadBalancer defines how the load balancer used by the cluster is configured. + // +default={"type": "OpenShiftManagedDefault"} + // +kubebuilder:default={"type": "OpenShiftManagedDefault"} + // +openshift:enable:FeatureSets=CustomNoUpgrade;TechPreviewNoUpgrade + // +optional + LoadBalancer *VSpherePlatformLoadBalancer `json:"loadBalancer,omitempty"` } // IBMCloudPlatformSpec holds the desired state of the IBMCloud infrastructure provider. @@ -994,6 +1257,7 @@ type PowerVSPlatformSpec struct { } // PowerVSPlatformStatus holds the current status of the IBM Power Systems Virtual Servers infrastrucutre provider. +// +kubebuilder:validation:XValidation:rule="!has(oldSelf.resourceGroup) || has(self.resourceGroup)",message="cannot unset resourceGroup once set" type PowerVSPlatformStatus struct { // region holds the default Power VS region for new Power VS resources created by the cluster. Region string `json:"region"` @@ -1002,6 +1266,18 @@ type PowerVSPlatformStatus struct { // Note: Currently only single-zone OCP clusters are supported Zone string `json:"zone"` + // resourceGroup is the resource group name for new IBMCloud resources created for a cluster. + // The resource group specified here will be used by cluster-image-registry-operator to set up a COS Instance in IBMCloud for the cluster registry. + // More about resource groups can be found here: https://cloud.ibm.com/docs/account?topic=account-rgs. + // When omitted, the image registry operator won't be able to configure storage, + // which results in the image registry cluster operator not being in an available state. + // + // +kubebuilder:validation:Pattern=^[a-zA-Z0-9-_ ]+$ + // +kubebuilder:validation:MaxLength=40 + // +kubebuilder:validation:XValidation:rule="oldSelf == '' || self == oldSelf",message="resourceGroup is immutable once set" + // +optional + ResourceGroup string `json:"resourceGroup"` + // serviceEndpoints is a list of custom endpoints which will override the default // service endpoints of a Power VS service. // +optional @@ -1055,6 +1331,27 @@ type AlibabaCloudResourceTag struct { Value string `json:"value"` } +// NutanixPlatformLoadBalancer defines the load balancer used by the cluster on Nutanix platform. +// +union +type NutanixPlatformLoadBalancer struct { + // type defines the type of load balancer used by the cluster on Nutanix platform + // which can be a user-managed or openshift-managed load balancer + // that is to be used for the OpenShift API and Ingress endpoints. + // When set to OpenShiftManagedDefault the static pods in charge of API and Ingress traffic load-balancing + // defined in the machine config operator will be deployed. + // When set to UserManaged these static pods will not be deployed and it is expected that + // the load balancer is configured out of band by the deployer. + // When omitted, this means no opinion and the platform is left to choose a reasonable default. + // The default value is OpenShiftManagedDefault. + // +default="OpenShiftManagedDefault" + // +kubebuilder:default:="OpenShiftManagedDefault" + // +kubebuilder:validation:Enum:="OpenShiftManagedDefault";"UserManaged" + // +kubebuilder:validation:XValidation:rule="oldSelf == '' || self == oldSelf",message="type is immutable once set" + // +optional + // +unionDiscriminator + Type PlatformLoadBalancerType `json:"type,omitempty"` +} + // NutanixPlatformSpec holds the desired state of the Nutanix infrastructure provider. // This only includes fields that can be modified in the cluster. type NutanixPlatformSpec struct { @@ -1140,6 +1437,13 @@ type NutanixPlatformStatus struct { // +kubebuilder:validation:Format=ip // +kubebuilder:validation:MaxItems=2 IngressIPs []string `json:"ingressIPs"` + + // loadBalancer defines how the load balancer used by the cluster is configured. + // +default={"type": "OpenShiftManagedDefault"} + // +kubebuilder:default={"type": "OpenShiftManagedDefault"} + // +openshift:enable:FeatureSets=CustomNoUpgrade;TechPreviewNoUpgrade + // +optional + LoadBalancer *NutanixPlatformLoadBalancer `json:"loadBalancer,omitempty"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -1150,6 +1454,9 @@ type NutanixPlatformStatus struct { // +openshift:compatibility-gen:level=1 type InfrastructureList struct { metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata"` Items []Infrastructure `json:"items"` diff --git a/vendor/github.com/openshift/api/config/v1/types_ingress.go b/vendor/github.com/openshift/api/config/v1/types_ingress.go index 1dec6b1d3..e518f6765 100644 --- a/vendor/github.com/openshift/api/config/v1/types_ingress.go +++ b/vendor/github.com/openshift/api/config/v1/types_ingress.go @@ -14,7 +14,10 @@ import ( // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 type Ingress struct { - metav1.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty"` // spec holds user settable values for configuration @@ -311,6 +314,9 @@ type ComponentRouteStatus struct { // +openshift:compatibility-gen:level=1 type IngressList struct { metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata"` Items []Ingress `json:"items"` diff --git a/vendor/github.com/openshift/api/config/v1/types_network.go b/vendor/github.com/openshift/api/config/v1/types_network.go index 59392a96d..c79bc8cf0 100644 --- a/vendor/github.com/openshift/api/config/v1/types_network.go +++ b/vendor/github.com/openshift/api/config/v1/types_network.go @@ -12,7 +12,10 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 type Network struct { - metav1.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty"` // spec holds user settable values for configuration. @@ -135,6 +138,9 @@ type ExternalIPPolicy struct { // +openshift:compatibility-gen:level=1 type NetworkList struct { metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata"` Items []Network `json:"items"` diff --git a/vendor/github.com/openshift/api/config/v1/types_node.go b/vendor/github.com/openshift/api/config/v1/types_node.go index 10a805902..233c89d9c 100644 --- a/vendor/github.com/openshift/api/config/v1/types_node.go +++ b/vendor/github.com/openshift/api/config/v1/types_node.go @@ -17,7 +17,10 @@ import ( // +kubebuilder:resource:path=nodes,scope=Cluster // +kubebuilder:subresource:status type Node struct { - metav1.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty"` // spec holds user settable values for configuration @@ -102,6 +105,9 @@ const ( // +openshift:compatibility-gen:level=1 type NodeList struct { metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata"` Items []Node `json:"items"` diff --git a/vendor/github.com/openshift/api/config/v1/types_oauth.go b/vendor/github.com/openshift/api/config/v1/types_oauth.go index 02fbbf9d4..451a5ec38 100644 --- a/vendor/github.com/openshift/api/config/v1/types_oauth.go +++ b/vendor/github.com/openshift/api/config/v1/types_oauth.go @@ -15,7 +15,10 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 type OAuth struct { - metav1.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata"` // spec holds user settable values for configuration // +kubebuilder:validation:Required @@ -533,11 +536,12 @@ type OpenIDIdentityProvider struct { // UserIDClaim is the claim used to provide a stable identifier for OIDC identities. // Per http://openid.net/specs/openid-connect-core-1_0.html#ClaimStability -// "The sub (subject) and iss (issuer) Claims, used together, are the only Claims that an RP can -// rely upon as a stable identifier for the End-User, since the sub Claim MUST be locally unique -// and never reassigned within the Issuer for a particular End-User, as described in Section 2. -// Therefore, the only guaranteed unique identifier for a given End-User is the combination of the -// iss Claim and the sub Claim." +// +// "The sub (subject) and iss (issuer) Claims, used together, are the only Claims that an RP can +// rely upon as a stable identifier for the End-User, since the sub Claim MUST be locally unique +// and never reassigned within the Issuer for a particular End-User, as described in Section 2. +// Therefore, the only guaranteed unique identifier for a given End-User is the combination of the +// iss Claim and the sub Claim." const UserIDClaim = "sub" // OpenIDClaim represents a claim retrieved from an OpenID provider's tokens or userInfo @@ -579,6 +583,9 @@ type OpenIDClaims struct { // +openshift:compatibility-gen:level=1 type OAuthList struct { metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata"` Items []OAuth `json:"items"` diff --git a/vendor/github.com/openshift/api/config/v1/types_operatorhub.go b/vendor/github.com/openshift/api/config/v1/types_operatorhub.go index 67a029529..ba2c96343 100644 --- a/vendor/github.com/openshift/api/config/v1/types_operatorhub.go +++ b/vendor/github.com/openshift/api/config/v1/types_operatorhub.go @@ -43,7 +43,10 @@ type OperatorHubStatus struct { // +genclient:nonNamespaced // +openshift:compatibility-gen:level=1 type OperatorHub struct { - metav1.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata"` Spec OperatorHubSpec `json:"spec"` @@ -58,6 +61,9 @@ type OperatorHub struct { // +openshift:compatibility-gen:level=1 type OperatorHubList struct { metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata"` Items []OperatorHub `json:"items"` } diff --git a/vendor/github.com/openshift/api/config/v1/types_project.go b/vendor/github.com/openshift/api/config/v1/types_project.go index add6abf66..85afb90c2 100644 --- a/vendor/github.com/openshift/api/config/v1/types_project.go +++ b/vendor/github.com/openshift/api/config/v1/types_project.go @@ -11,7 +11,10 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 type Project struct { - metav1.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty"` // spec holds user settable values for configuration @@ -53,6 +56,9 @@ type ProjectStatus struct { // +openshift:compatibility-gen:level=1 type ProjectList struct { metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata"` Items []Project `json:"items"` diff --git a/vendor/github.com/openshift/api/config/v1/types_proxy.go b/vendor/github.com/openshift/api/config/v1/types_proxy.go index 01ee4690d..40ed296d6 100644 --- a/vendor/github.com/openshift/api/config/v1/types_proxy.go +++ b/vendor/github.com/openshift/api/config/v1/types_proxy.go @@ -13,7 +13,10 @@ import ( // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 type Proxy struct { - metav1.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty"` // Spec holds user-settable values for the proxy configuration @@ -93,6 +96,9 @@ type ProxyStatus struct { // +openshift:compatibility-gen:level=1 type ProxyList struct { metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata"` Items []Proxy `json:"items"` diff --git a/vendor/github.com/openshift/api/config/v1/types_scheduling.go b/vendor/github.com/openshift/api/config/v1/types_scheduling.go index a69d2a35c..7367f414f 100644 --- a/vendor/github.com/openshift/api/config/v1/types_scheduling.go +++ b/vendor/github.com/openshift/api/config/v1/types_scheduling.go @@ -12,7 +12,10 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 type Scheduler struct { - metav1.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty"` // spec holds user settable values for configuration @@ -99,6 +102,9 @@ type SchedulerStatus struct { // +openshift:compatibility-gen:level=1 type SchedulerList struct { metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata"` Items []Scheduler `json:"items"` diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go index a9babbc7f..44d7428e6 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.deepcopy.go @@ -179,6 +179,22 @@ func (in *APIServerStatus) DeepCopy() *APIServerStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSDNSSpec) DeepCopyInto(out *AWSDNSSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSDNSSpec. +func (in *AWSDNSSpec) DeepCopy() *AWSDNSSpec { + if in == nil { + return nil + } + out := new(AWSDNSSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AWSIngressSpec) DeepCopyInto(out *AWSIngressSpec) { *out = *in @@ -555,6 +571,11 @@ func (in *AzurePlatformSpec) DeepCopy() *AzurePlatformSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AzurePlatformStatus) DeepCopyInto(out *AzurePlatformStatus) { *out = *in + if in.ResourceTags != nil { + in, out := &in.ResourceTags, &out.ResourceTags + *out = make([]AzureResourceTag, len(*in)) + copy(*out, *in) + } return } @@ -568,6 +589,38 @@ func (in *AzurePlatformStatus) DeepCopy() *AzurePlatformStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureResourceTag) DeepCopyInto(out *AzureResourceTag) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureResourceTag. +func (in *AzureResourceTag) DeepCopy() *AzureResourceTag { + if in == nil { + return nil + } + out := new(AzureResourceTag) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BareMetalPlatformLoadBalancer) DeepCopyInto(out *BareMetalPlatformLoadBalancer) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BareMetalPlatformLoadBalancer. +func (in *BareMetalPlatformLoadBalancer) DeepCopy() *BareMetalPlatformLoadBalancer { + if in == nil { + return nil + } + out := new(BareMetalPlatformLoadBalancer) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *BareMetalPlatformSpec) DeepCopyInto(out *BareMetalPlatformSpec) { *out = *in @@ -597,6 +650,11 @@ func (in *BareMetalPlatformStatus) DeepCopyInto(out *BareMetalPlatformStatus) { *out = make([]string, len(*in)) copy(*out, *in) } + if in.LoadBalancer != nil { + in, out := &in.LoadBalancer, &out.LoadBalancer + *out = new(BareMetalPlatformLoadBalancer) + **out = **in + } return } @@ -818,17 +876,17 @@ func (in *ClientConnectionOverrides) DeepCopy() *ClientConnectionOverrides { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CloudControllerManagerSpec) DeepCopyInto(out *CloudControllerManagerSpec) { +func (in *CloudControllerManagerStatus) DeepCopyInto(out *CloudControllerManagerStatus) { *out = *in return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudControllerManagerSpec. -func (in *CloudControllerManagerSpec) DeepCopy() *CloudControllerManagerSpec { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudControllerManagerStatus. +func (in *CloudControllerManagerStatus) DeepCopy() *CloudControllerManagerStatus { if in == nil { return nil } - out := new(CloudControllerManagerSpec) + out := new(CloudControllerManagerStatus) in.DeepCopyInto(out) return out } @@ -1455,12 +1513,12 @@ func (in *CustomFeatureGates) DeepCopyInto(out *CustomFeatureGates) { *out = *in if in.Enabled != nil { in, out := &in.Enabled, &out.Enabled - *out = make([]string, len(*in)) + *out = make([]FeatureGateName, len(*in)) copy(*out, *in) } if in.Disabled != nil { in, out := &in.Disabled, &out.Disabled - *out = make([]string, len(*in)) + *out = make([]FeatureGateName, len(*in)) copy(*out, *in) } return @@ -1554,6 +1612,27 @@ func (in *DNSList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSPlatformSpec) DeepCopyInto(out *DNSPlatformSpec) { + *out = *in + if in.AWS != nil { + in, out := &in.AWS, &out.AWS + *out = new(AWSDNSSpec) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSPlatformSpec. +func (in *DNSPlatformSpec) DeepCopy() *DNSPlatformSpec { + if in == nil { + return nil + } + out := new(DNSPlatformSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DNSSpec) DeepCopyInto(out *DNSSpec) { *out = *in @@ -1567,6 +1646,7 @@ func (in *DNSSpec) DeepCopyInto(out *DNSSpec) { *out = new(DNSZone) (*in).DeepCopyInto(*out) } + in.Platform.DeepCopyInto(&out.Platform) return } @@ -1794,7 +1874,6 @@ func (in *ExternalIPPolicy) DeepCopy() *ExternalIPPolicy { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ExternalPlatformSpec) DeepCopyInto(out *ExternalPlatformSpec) { *out = *in - out.CloudControllerManager = in.CloudControllerManager return } @@ -1811,6 +1890,7 @@ func (in *ExternalPlatformSpec) DeepCopy() *ExternalPlatformSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ExternalPlatformStatus) DeepCopyInto(out *ExternalPlatformStatus) { *out = *in + out.CloudControllerManager = in.CloudControllerManager return } @@ -1830,7 +1910,7 @@ func (in *FeatureGate) DeepCopyInto(out *FeatureGate) { out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) - out.Status = in.Status + in.Status.DeepCopyInto(&out.Status) return } @@ -1852,17 +1932,76 @@ func (in *FeatureGate) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FeatureGateAttributes) DeepCopyInto(out *FeatureGateAttributes) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGateAttributes. +func (in *FeatureGateAttributes) DeepCopy() *FeatureGateAttributes { + if in == nil { + return nil + } + out := new(FeatureGateAttributes) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FeatureGateDescription) DeepCopyInto(out *FeatureGateDescription) { + *out = *in + out.FeatureGateAttributes = in.FeatureGateAttributes + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGateDescription. +func (in *FeatureGateDescription) DeepCopy() *FeatureGateDescription { + if in == nil { + return nil + } + out := new(FeatureGateDescription) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FeatureGateDetails) DeepCopyInto(out *FeatureGateDetails) { + *out = *in + if in.Enabled != nil { + in, out := &in.Enabled, &out.Enabled + *out = make([]FeatureGateAttributes, len(*in)) + copy(*out, *in) + } + if in.Disabled != nil { + in, out := &in.Disabled, &out.Disabled + *out = make([]FeatureGateAttributes, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureGateDetails. +func (in *FeatureGateDetails) DeepCopy() *FeatureGateDetails { + if in == nil { + return nil + } + out := new(FeatureGateDetails) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *FeatureGateEnabledDisabled) DeepCopyInto(out *FeatureGateEnabledDisabled) { *out = *in if in.Enabled != nil { in, out := &in.Enabled, &out.Enabled - *out = make([]string, len(*in)) + *out = make([]FeatureGateDescription, len(*in)) copy(*out, *in) } if in.Disabled != nil { in, out := &in.Disabled, &out.Disabled - *out = make([]string, len(*in)) + *out = make([]FeatureGateDescription, len(*in)) copy(*out, *in) } return @@ -1952,6 +2091,20 @@ func (in *FeatureGateSpec) DeepCopy() *FeatureGateSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *FeatureGateStatus) DeepCopyInto(out *FeatureGateStatus) { *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FeatureGates != nil { + in, out := &in.FeatureGates, &out.FeatureGates + *out = make([]FeatureGateDetails, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } @@ -1984,6 +2137,16 @@ func (in *GCPPlatformSpec) DeepCopy() *GCPPlatformSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *GCPPlatformStatus) DeepCopyInto(out *GCPPlatformStatus) { *out = *in + if in.ResourceLabels != nil { + in, out := &in.ResourceLabels, &out.ResourceLabels + *out = make([]GCPResourceLabel, len(*in)) + copy(*out, *in) + } + if in.ResourceTags != nil { + in, out := &in.ResourceTags, &out.ResourceTags + *out = make([]GCPResourceTag, len(*in)) + copy(*out, *in) + } return } @@ -1997,6 +2160,38 @@ func (in *GCPPlatformStatus) DeepCopy() *GCPPlatformStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GCPResourceLabel) DeepCopyInto(out *GCPResourceLabel) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPResourceLabel. +func (in *GCPResourceLabel) DeepCopy() *GCPResourceLabel { + if in == nil { + return nil + } + out := new(GCPResourceLabel) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GCPResourceTag) DeepCopyInto(out *GCPResourceTag) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPResourceTag. +func (in *GCPResourceTag) DeepCopy() *GCPResourceTag { + if in == nil { + return nil + } + out := new(GCPResourceTag) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *GenericAPIServerConfig) DeepCopyInto(out *GenericAPIServerConfig) { *out = *in @@ -3494,6 +3689,22 @@ func (in *NodeStatus) DeepCopy() *NodeStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NutanixPlatformLoadBalancer) DeepCopyInto(out *NutanixPlatformLoadBalancer) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NutanixPlatformLoadBalancer. +func (in *NutanixPlatformLoadBalancer) DeepCopy() *NutanixPlatformLoadBalancer { + if in == nil { + return nil + } + out := new(NutanixPlatformLoadBalancer) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NutanixPlatformSpec) DeepCopyInto(out *NutanixPlatformSpec) { *out = *in @@ -3529,6 +3740,11 @@ func (in *NutanixPlatformStatus) DeepCopyInto(out *NutanixPlatformStatus) { *out = make([]string, len(*in)) copy(*out, *in) } + if in.LoadBalancer != nil { + in, out := &in.LoadBalancer, &out.LoadBalancer + *out = new(NutanixPlatformLoadBalancer) + **out = **in + } return } @@ -3814,6 +4030,22 @@ func (in *OpenIDIdentityProvider) DeepCopy() *OpenIDIdentityProvider { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenStackPlatformLoadBalancer) DeepCopyInto(out *OpenStackPlatformLoadBalancer) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenStackPlatformLoadBalancer. +func (in *OpenStackPlatformLoadBalancer) DeepCopy() *OpenStackPlatformLoadBalancer { + if in == nil { + return nil + } + out := new(OpenStackPlatformLoadBalancer) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *OpenStackPlatformSpec) DeepCopyInto(out *OpenStackPlatformSpec) { *out = *in @@ -3843,6 +4075,11 @@ func (in *OpenStackPlatformStatus) DeepCopyInto(out *OpenStackPlatformStatus) { *out = make([]string, len(*in)) copy(*out, *in) } + if in.LoadBalancer != nil { + in, out := &in.LoadBalancer, &out.LoadBalancer + *out = new(OpenStackPlatformLoadBalancer) + **out = **in + } return } @@ -3975,6 +4212,22 @@ func (in *OperatorHubStatus) DeepCopy() *OperatorHubStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OvirtPlatformLoadBalancer) DeepCopyInto(out *OvirtPlatformLoadBalancer) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OvirtPlatformLoadBalancer. +func (in *OvirtPlatformLoadBalancer) DeepCopy() *OvirtPlatformLoadBalancer { + if in == nil { + return nil + } + out := new(OvirtPlatformLoadBalancer) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *OvirtPlatformSpec) DeepCopyInto(out *OvirtPlatformSpec) { *out = *in @@ -4004,6 +4257,11 @@ func (in *OvirtPlatformStatus) DeepCopyInto(out *OvirtPlatformStatus) { *out = make([]string, len(*in)) copy(*out, *in) } + if in.LoadBalancer != nil { + in, out := &in.LoadBalancer, &out.LoadBalancer + *out = new(OvirtPlatformLoadBalancer) + **out = **in + } return } @@ -4114,12 +4372,12 @@ func (in *PlatformStatus) DeepCopyInto(out *PlatformStatus) { if in.Azure != nil { in, out := &in.Azure, &out.Azure *out = new(AzurePlatformStatus) - **out = **in + (*in).DeepCopyInto(*out) } if in.GCP != nil { in, out := &in.GCP, &out.GCP *out = new(GCPPlatformStatus) - **out = **in + (*in).DeepCopyInto(*out) } if in.BareMetal != nil { in, out := &in.BareMetal, &out.BareMetal @@ -4956,6 +5214,22 @@ func (in *VSpherePlatformFailureDomainSpec) DeepCopy() *VSpherePlatformFailureDo return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VSpherePlatformLoadBalancer) DeepCopyInto(out *VSpherePlatformLoadBalancer) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VSpherePlatformLoadBalancer. +func (in *VSpherePlatformLoadBalancer) DeepCopy() *VSpherePlatformLoadBalancer { + if in == nil { + return nil + } + out := new(VSpherePlatformLoadBalancer) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VSpherePlatformNodeNetworking) DeepCopyInto(out *VSpherePlatformNodeNetworking) { *out = *in @@ -5044,6 +5318,11 @@ func (in *VSpherePlatformStatus) DeepCopyInto(out *VSpherePlatformStatus) { *out = make([]string, len(*in)) copy(*out, *in) } + if in.LoadBalancer != nil { + in, out := &in.LoadBalancer, &out.LoadBalancer + *out = new(VSpherePlatformLoadBalancer) + **out = **in + } return } diff --git a/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go index 0f149c990..33ec92237 100644 --- a/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/config/v1/zz_generated.swagger_doc_generated.go @@ -266,9 +266,10 @@ func (StringSourceSpec) SwaggerDoc() map[string]string { } var map_APIServer = map[string]string{ - "": "APIServer holds configuration (like serving certificates, client CA and CORS domains) shared by all API servers in the system, among them especially kube-apiserver and openshift-apiserver. The canonical name of an instance is 'cluster'.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "spec": "spec holds user settable values for configuration", - "status": "status holds observed values from the cluster. They may not be overridden.", + "": "APIServer holds configuration (like serving certificates, client CA and CORS domains) shared by all API servers in the system, among them especially kube-apiserver and openshift-apiserver. The canonical name of an instance is 'cluster'.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", } func (APIServer) SwaggerDoc() map[string]string { @@ -284,7 +285,8 @@ func (APIServerEncryption) SwaggerDoc() map[string]string { } var map_APIServerList = map[string]string{ - "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", } func (APIServerList) SwaggerDoc() map[string]string { @@ -342,9 +344,10 @@ func (AuditCustomRule) SwaggerDoc() map[string]string { } var map_Authentication = map[string]string{ - "": "Authentication specifies cluster-wide settings for authentication (like OAuth and webhook token authenticators). The canonical name of an instance is `cluster`.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "spec": "spec holds user settable values for configuration", - "status": "status holds observed values from the cluster. They may not be overridden.", + "": "Authentication specifies cluster-wide settings for authentication (like OAuth and webhook token authenticators). The canonical name of an instance is `cluster`.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", } func (Authentication) SwaggerDoc() map[string]string { @@ -352,7 +355,8 @@ func (Authentication) SwaggerDoc() map[string]string { } var map_AuthenticationList = map[string]string{ - "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", } func (AuthenticationList) SwaggerDoc() map[string]string { @@ -398,8 +402,9 @@ func (WebhookTokenAuthenticator) SwaggerDoc() map[string]string { } var map_Build = map[string]string{ - "": "Build configures the behavior of OpenShift builds for the entire cluster. This includes default settings that can be overridden in BuildConfig objects, and overrides which are applied to all builds.\n\nThe canonical name is \"cluster\"\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "spec": "Spec holds user-settable values for the build controller configuration", + "": "Build configures the behavior of OpenShift builds for the entire cluster. This includes default settings that can be overridden in BuildConfig objects, and overrides which are applied to all builds.\n\nThe canonical name is \"cluster\"\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Spec holds user-settable values for the build controller configuration", } func (Build) SwaggerDoc() map[string]string { @@ -419,7 +424,8 @@ func (BuildDefaults) SwaggerDoc() map[string]string { } var map_BuildList = map[string]string{ - "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", } func (BuildList) SwaggerDoc() map[string]string { @@ -457,9 +463,10 @@ func (ImageLabel) SwaggerDoc() map[string]string { } var map_ClusterOperator = map[string]string{ - "": "ClusterOperator is the Custom Resource object which holds the current state of an operator. This object is used by operators to convey their state to the rest of the cluster.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "spec": "spec holds configuration that could apply to any operator.", - "status": "status holds the information about the state of an operator. It is consistent with status information across the Kubernetes ecosystem.", + "": "ClusterOperator is the Custom Resource object which holds the current state of an operator. This object is used by operators to convey their state to the rest of the cluster.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec holds configuration that could apply to any operator.", + "status": "status holds the information about the state of an operator. It is consistent with status information across the Kubernetes ecosystem.", } func (ClusterOperator) SwaggerDoc() map[string]string { @@ -467,7 +474,8 @@ func (ClusterOperator) SwaggerDoc() map[string]string { } var map_ClusterOperatorList = map[string]string{ - "": "ClusterOperatorList is a list of OperatorStatus resources.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "": "ClusterOperatorList is a list of OperatorStatus resources.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", } func (ClusterOperatorList) SwaggerDoc() map[string]string { @@ -539,9 +547,10 @@ func (ClusterCondition) SwaggerDoc() map[string]string { } var map_ClusterVersion = map[string]string{ - "": "ClusterVersion is the configuration for the ClusterVersionOperator. This is where parameters related to automatic updates can be set.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "spec": "spec is the desired state of the cluster version - the operator will work to ensure that the desired version is applied to the cluster.", - "status": "status contains information about the available updates and any in-progress updates.", + "": "ClusterVersion is the configuration for the ClusterVersionOperator. This is where parameters related to automatic updates can be set.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec is the desired state of the cluster version - the operator will work to ensure that the desired version is applied to the cluster.", + "status": "status contains information about the available updates and any in-progress updates.", } func (ClusterVersion) SwaggerDoc() map[string]string { @@ -569,7 +578,8 @@ func (ClusterVersionCapabilitiesStatus) SwaggerDoc() map[string]string { } var map_ClusterVersionList = map[string]string{ - "": "ClusterVersionList is a list of ClusterVersion resources.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "": "ClusterVersionList is a list of ClusterVersion resources.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", } func (ClusterVersionList) SwaggerDoc() map[string]string { @@ -691,9 +701,10 @@ func (UpdateHistory) SwaggerDoc() map[string]string { } var map_Console = map[string]string{ - "": "Console holds cluster-wide configuration for the web console, including the logout URL, and reports the public URL of the console. The canonical name is `cluster`.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "spec": "spec holds user settable values for configuration", - "status": "status holds observed values from the cluster. They may not be overridden.", + "": "Console holds cluster-wide configuration for the web console, including the logout URL, and reports the public URL of the console. The canonical name is `cluster`.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", } func (Console) SwaggerDoc() map[string]string { @@ -710,7 +721,8 @@ func (ConsoleAuthentication) SwaggerDoc() map[string]string { } var map_ConsoleList = map[string]string{ - "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", } func (ConsoleList) SwaggerDoc() map[string]string { @@ -734,10 +746,20 @@ func (ConsoleStatus) SwaggerDoc() map[string]string { return map_ConsoleStatus } +var map_AWSDNSSpec = map[string]string{ + "": "AWSDNSSpec contains DNS configuration specific to the Amazon Web Services cloud provider.", + "privateZoneIAMRole": "privateZoneIAMRole contains the ARN of an IAM role that should be assumed when performing operations on the cluster's private hosted zone specified in the cluster DNS config. When left empty, no role should be assumed.", +} + +func (AWSDNSSpec) SwaggerDoc() map[string]string { + return map_AWSDNSSpec +} + var map_DNS = map[string]string{ - "": "DNS holds cluster-wide information about DNS. The canonical name is `cluster`\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "spec": "spec holds user settable values for configuration", - "status": "status holds observed values from the cluster. They may not be overridden.", + "": "DNS holds cluster-wide information about DNS. The canonical name is `cluster`\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", } func (DNS) SwaggerDoc() map[string]string { @@ -745,17 +767,29 @@ func (DNS) SwaggerDoc() map[string]string { } var map_DNSList = map[string]string{ - "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", } func (DNSList) SwaggerDoc() map[string]string { return map_DNSList } +var map_DNSPlatformSpec = map[string]string{ + "": "DNSPlatformSpec holds cloud-provider-specific configuration for DNS administration.", + "type": "type is the underlying infrastructure provider for the cluster. Allowed values: \"\", \"AWS\".\n\nIndividual components may not support all platforms, and must handle unrecognized platforms with best-effort defaults.", + "aws": "aws contains DNS configuration specific to the Amazon Web Services cloud provider.", +} + +func (DNSPlatformSpec) SwaggerDoc() map[string]string { + return map_DNSPlatformSpec +} + var map_DNSSpec = map[string]string{ "baseDomain": "baseDomain is the base domain of the cluster. All managed DNS records will be sub-domains of this base.\n\nFor example, given the base domain `openshift.example.com`, an API server DNS record may be created for `cluster-api.openshift.example.com`.\n\nOnce set, this field cannot be changed.", "publicZone": "publicZone is the location where all the DNS records that are publicly accessible to the internet exist.\n\nIf this field is nil, no public records should be created.\n\nOnce set, this field cannot be changed.", "privateZone": "privateZone is the location where all the DNS records that are only available internally to the cluster exist.\n\nIf this field is nil, no private records should be created.\n\nOnce set, this field cannot be changed.", + "platform": "platform holds configuration specific to the underlying infrastructure provider for DNS. When omitted, this means the user has no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time.", } func (DNSSpec) SwaggerDoc() map[string]string { @@ -782,17 +816,37 @@ func (CustomFeatureGates) SwaggerDoc() map[string]string { } var map_FeatureGate = map[string]string{ - "": "Feature holds cluster-wide information about feature gates. The canonical name is `cluster`\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "spec": "spec holds user settable values for configuration", - "status": "status holds observed values from the cluster. They may not be overridden.", + "": "Feature holds cluster-wide information about feature gates. The canonical name is `cluster`\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", } func (FeatureGate) SwaggerDoc() map[string]string { return map_FeatureGate } +var map_FeatureGateAttributes = map[string]string{ + "name": "name is the name of the FeatureGate.", +} + +func (FeatureGateAttributes) SwaggerDoc() map[string]string { + return map_FeatureGateAttributes +} + +var map_FeatureGateDetails = map[string]string{ + "version": "version matches the version provided by the ClusterVersion and in the ClusterOperator.Status.Versions field.", + "enabled": "enabled is a list of all feature gates that are enabled in the cluster for the named version.", + "disabled": "disabled is a list of all feature gates that are disabled in the cluster for the named version.", +} + +func (FeatureGateDetails) SwaggerDoc() map[string]string { + return map_FeatureGateDetails +} + var map_FeatureGateList = map[string]string{ - "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", } func (FeatureGateList) SwaggerDoc() map[string]string { @@ -808,10 +862,20 @@ func (FeatureGateSelection) SwaggerDoc() map[string]string { return map_FeatureGateSelection } +var map_FeatureGateStatus = map[string]string{ + "conditions": "conditions represent the observations of the current state. Known .status.conditions.type are: \"DeterminationDegraded\"", + "featureGates": "featureGates contains a list of enabled and disabled featureGates that are keyed by payloadVersion. Operators other than the CVO and cluster-config-operator, must read the .status.featureGates, locate the version they are managing, find the enabled/disabled featuregates and make the operand and operator match. The enabled/disabled values for a particular version may change during the life of the cluster as various .spec.featureSet values are selected. Operators may choose to restart their processes to pick up these changes, but remembering past enable/disable lists is beyond the scope of this API and is the responsibility of individual operators. Only featureGates with .version in the ClusterVersion.status will be present in this list.", +} + +func (FeatureGateStatus) SwaggerDoc() map[string]string { + return map_FeatureGateStatus +} + var map_Image = map[string]string{ - "": "Image governs policies related to imagestream imports and runtime configuration for external registries. It allows cluster admins to configure which registries OpenShift is allowed to import images from, extra CA trust bundles for external registries, and policies to block or allow registry hostnames. When exposing OpenShift's image registry to the public, this also lets cluster admins specify the external hostname.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "spec": "spec holds user settable values for configuration", - "status": "status holds observed values from the cluster. They may not be overridden.", + "": "Image governs policies related to imagestream imports and runtime configuration for external registries. It allows cluster admins to configure which registries OpenShift is allowed to import images from, extra CA trust bundles for external registries, and policies to block or allow registry hostnames. When exposing OpenShift's image registry to the public, this also lets cluster admins specify the external hostname.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", } func (Image) SwaggerDoc() map[string]string { @@ -819,7 +883,8 @@ func (Image) SwaggerDoc() map[string]string { } var map_ImageList = map[string]string{ - "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", } func (ImageList) SwaggerDoc() map[string]string { @@ -869,8 +934,9 @@ func (RegistrySources) SwaggerDoc() map[string]string { } var map_ImageContentPolicy = map[string]string{ - "": "ImageContentPolicy holds cluster-wide information about how to handle registry mirror rules. When multiple policies are defined, the outcome of the behavior is defined on each field.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "spec": "spec holds user settable values for configuration", + "": "ImageContentPolicy holds cluster-wide information about how to handle registry mirror rules. When multiple policies are defined, the outcome of the behavior is defined on each field.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec holds user settable values for configuration", } func (ImageContentPolicy) SwaggerDoc() map[string]string { @@ -878,7 +944,8 @@ func (ImageContentPolicy) SwaggerDoc() map[string]string { } var map_ImageContentPolicyList = map[string]string{ - "": "ImageContentPolicyList lists the items in the ImageContentPolicy CRD.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "": "ImageContentPolicyList lists the items in the ImageContentPolicy CRD.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", } func (ImageContentPolicyList) SwaggerDoc() map[string]string { @@ -906,9 +973,10 @@ func (RepositoryDigestMirrors) SwaggerDoc() map[string]string { } var map_ImageDigestMirrorSet = map[string]string{ - "": "ImageDigestMirrorSet holds cluster-wide information about how to handle registry mirror rules on using digest pull specification. When multiple policies are defined, the outcome of the behavior is defined on each field.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "spec": "spec holds user settable values for configuration", - "status": "status contains the observed state of the resource.", + "": "ImageDigestMirrorSet holds cluster-wide information about how to handle registry mirror rules on using digest pull specification. When multiple policies are defined, the outcome of the behavior is defined on each field.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec holds user settable values for configuration", + "status": "status contains the observed state of the resource.", } func (ImageDigestMirrorSet) SwaggerDoc() map[string]string { @@ -916,7 +984,8 @@ func (ImageDigestMirrorSet) SwaggerDoc() map[string]string { } var map_ImageDigestMirrorSetList = map[string]string{ - "": "ImageDigestMirrorSetList lists the items in the ImageDigestMirrorSet CRD.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "": "ImageDigestMirrorSetList lists the items in the ImageDigestMirrorSet CRD.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", } func (ImageDigestMirrorSetList) SwaggerDoc() map[string]string { @@ -944,9 +1013,10 @@ func (ImageDigestMirrors) SwaggerDoc() map[string]string { } var map_ImageTagMirrorSet = map[string]string{ - "": "ImageTagMirrorSet holds cluster-wide information about how to handle registry mirror rules on using tag pull specification. When multiple policies are defined, the outcome of the behavior is defined on each field.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "spec": "spec holds user settable values for configuration", - "status": "status contains the observed state of the resource.", + "": "ImageTagMirrorSet holds cluster-wide information about how to handle registry mirror rules on using tag pull specification. When multiple policies are defined, the outcome of the behavior is defined on each field.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec holds user settable values for configuration", + "status": "status contains the observed state of the resource.", } func (ImageTagMirrorSet) SwaggerDoc() map[string]string { @@ -954,7 +1024,8 @@ func (ImageTagMirrorSet) SwaggerDoc() map[string]string { } var map_ImageTagMirrorSetList = map[string]string{ - "": "ImageTagMirrorSetList lists the items in the ImageTagMirrorSet CRD.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "": "ImageTagMirrorSetList lists the items in the ImageTagMirrorSet CRD.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", } func (ImageTagMirrorSetList) SwaggerDoc() map[string]string { @@ -1064,12 +1135,32 @@ var map_AzurePlatformStatus = map[string]string{ "networkResourceGroupName": "networkResourceGroupName is the Resource Group for network resources like the Virtual Network and Subnets used by the cluster. If empty, the value is same as ResourceGroupName.", "cloudName": "cloudName is the name of the Azure cloud environment which can be used to configure the Azure SDK with the appropriate Azure API endpoints. If empty, the value is equal to `AzurePublicCloud`.", "armEndpoint": "armEndpoint specifies a URL to use for resource management in non-soverign clouds such as Azure Stack.", + "resourceTags": "resourceTags is a list of additional tags to apply to Azure resources created for the cluster. See https://docs.microsoft.com/en-us/rest/api/resources/tags for information on tagging Azure resources. Due to limitations on Automation, Content Delivery Network, DNS Azure resources, a maximum of 15 tags may be applied. OpenShift reserves 5 tags for internal use, allowing 10 tags for user configuration.", } func (AzurePlatformStatus) SwaggerDoc() map[string]string { return map_AzurePlatformStatus } +var map_AzureResourceTag = map[string]string{ + "": "AzureResourceTag is a tag to apply to Azure resources created for the cluster.", + "key": "key is the key part of the tag. A tag key can have a maximum of 128 characters and cannot be empty. Key must begin with a letter, end with a letter, number or underscore, and must contain only alphanumeric characters and the following special characters `_ . -`.", + "value": "value is the value part of the tag. A tag value can have a maximum of 256 characters and cannot be empty. Value must contain only alphanumeric characters and the following special characters `_ + , - . / : ; < = > ? @`.", +} + +func (AzureResourceTag) SwaggerDoc() map[string]string { + return map_AzureResourceTag +} + +var map_BareMetalPlatformLoadBalancer = map[string]string{ + "": "BareMetalPlatformLoadBalancer defines the load balancer used by the cluster on BareMetal platform.", + "type": "type defines the type of load balancer used by the cluster on BareMetal platform which can be a user-managed or openshift-managed load balancer that is to be used for the OpenShift API and Ingress endpoints. When set to OpenShiftManagedDefault the static pods in charge of API and Ingress traffic load-balancing defined in the machine config operator will be deployed. When set to UserManaged these static pods will not be deployed and it is expected that the load balancer is configured out of band by the deployer. When omitted, this means no opinion and the platform is left to choose a reasonable default. The default value is OpenShiftManagedDefault.", +} + +func (BareMetalPlatformLoadBalancer) SwaggerDoc() map[string]string { + return map_BareMetalPlatformLoadBalancer +} + var map_BareMetalPlatformSpec = map[string]string{ "": "BareMetalPlatformSpec holds the desired state of the BareMetal infrastructure provider. This only includes fields that can be modified in the cluster.", } @@ -1085,19 +1176,20 @@ var map_BareMetalPlatformStatus = map[string]string{ "ingressIP": "ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names.\n\nDeprecated: Use IngressIPs instead.", "ingressIPs": "ingressIPs are the external IPs which route to the default ingress controller. The IPs are suitable targets of a wildcard DNS record used to resolve default route host names. In dual stack clusters this list contains two IPs otherwise only one.", "nodeDNSIP": "nodeDNSIP is the IP address for the internal DNS used by the nodes. Unlike the one managed by the DNS operator, `NodeDNSIP` provides name resolution for the nodes themselves. There is no DNS-as-a-service for BareMetal deployments. In order to minimize necessary changes to the datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames to the nodes in the cluster.", + "loadBalancer": "loadBalancer defines how the load balancer used by the cluster is configured.", } func (BareMetalPlatformStatus) SwaggerDoc() map[string]string { return map_BareMetalPlatformStatus } -var map_CloudControllerManagerSpec = map[string]string{ - "": "CloudControllerManagerSpec holds Cloud Controller Manager (a.k.a. CCM or CPI) related settings", - "state": "state determines whether or not an external Cloud Controller Manager is expected to be installed within the cluster. https://kubernetes.io/docs/tasks/administer-cluster/running-cloud-controller/#running-cloud-controller-manager\n\nWhen set to \"External\", new nodes will be tainted as uninitialized when created, preventing them from running workloads until they are initialized by the cloud controller manager. When omitted or set to \"None\", new nodes will be not tainted and no extra initialization from the cloud controller manager is expected.", +var map_CloudControllerManagerStatus = map[string]string{ + "": "CloudControllerManagerStatus holds the state of Cloud Controller Manager (a.k.a. CCM or CPI) related settings", + "state": "state determines whether or not an external Cloud Controller Manager is expected to be installed within the cluster. https://kubernetes.io/docs/tasks/administer-cluster/running-cloud-controller/#running-cloud-controller-manager\n\nValid values are \"External\", \"None\" and omitted. When set to \"External\", new nodes will be tainted as uninitialized when created, preventing them from running workloads until they are initialized by the cloud controller manager. When omitted or set to \"None\", new nodes will be not tainted and no extra initialization from the cloud controller manager is expected.", } -func (CloudControllerManagerSpec) SwaggerDoc() map[string]string { - return map_CloudControllerManagerSpec +func (CloudControllerManagerStatus) SwaggerDoc() map[string]string { + return map_CloudControllerManagerStatus } var map_EquinixMetalPlatformSpec = map[string]string{ @@ -1119,9 +1211,8 @@ func (EquinixMetalPlatformStatus) SwaggerDoc() map[string]string { } var map_ExternalPlatformSpec = map[string]string{ - "": "ExternalPlatformSpec holds the desired state for the generic External infrastructure provider.", - "platformName": "PlatformName holds the arbitrary string representing the infrastructure provider name, expected to be set at the installation time. This field is solely for informational and reporting purposes and is not expected to be used for decision-making.", - "cloudControllerManager": "CloudControllerManager contains settings specific to the external Cloud Controller Manager (a.k.a. CCM or CPI)", + "": "ExternalPlatformSpec holds the desired state for the generic External infrastructure provider.", + "platformName": "PlatformName holds the arbitrary string representing the infrastructure provider name, expected to be set at the installation time. This field is solely for informational and reporting purposes and is not expected to be used for decision-making.", } func (ExternalPlatformSpec) SwaggerDoc() map[string]string { @@ -1129,7 +1220,8 @@ func (ExternalPlatformSpec) SwaggerDoc() map[string]string { } var map_ExternalPlatformStatus = map[string]string{ - "": "ExternalPlatformStatus holds the current status of the generic External infrastructure provider.", + "": "ExternalPlatformStatus holds the current status of the generic External infrastructure provider.", + "cloudControllerManager": "cloudControllerManager contains settings specific to the external Cloud Controller Manager (a.k.a. CCM or CPI). When omitted, new nodes will be not tainted and no extra initialization from the cloud controller manager is expected.", } func (ExternalPlatformStatus) SwaggerDoc() map[string]string { @@ -1145,15 +1237,38 @@ func (GCPPlatformSpec) SwaggerDoc() map[string]string { } var map_GCPPlatformStatus = map[string]string{ - "": "GCPPlatformStatus holds the current status of the Google Cloud Platform infrastructure provider.", - "projectID": "resourceGroupName is the Project ID for new GCP resources created for the cluster.", - "region": "region holds the region for new GCP resources created for the cluster.", + "": "GCPPlatformStatus holds the current status of the Google Cloud Platform infrastructure provider.", + "projectID": "resourceGroupName is the Project ID for new GCP resources created for the cluster.", + "region": "region holds the region for new GCP resources created for the cluster.", + "resourceLabels": "resourceLabels is a list of additional labels to apply to GCP resources created for the cluster. See https://cloud.google.com/compute/docs/labeling-resources for information on labeling GCP resources. GCP supports a maximum of 64 labels per resource. OpenShift reserves 32 labels for internal use, allowing 32 labels for user configuration.", + "resourceTags": "resourceTags is a list of additional tags to apply to GCP resources created for the cluster. See https://cloud.google.com/resource-manager/docs/tags/tags-overview for information on tagging GCP resources. GCP supports a maximum of 50 tags per resource.", } func (GCPPlatformStatus) SwaggerDoc() map[string]string { return map_GCPPlatformStatus } +var map_GCPResourceLabel = map[string]string{ + "": "GCPResourceLabel is a label to apply to GCP resources created for the cluster.", + "key": "key is the key part of the label. A label key can have a maximum of 63 characters and cannot be empty. Label key must begin with a lowercase letter, and must contain only lowercase letters, numeric characters, and the following special characters `_-`. Label key must not have the reserved prefixes `kubernetes-io` and `openshift-io`.", + "value": "value is the value part of the label. A label value can have a maximum of 63 characters and cannot be empty. Value must contain only lowercase letters, numeric characters, and the following special characters `_-`.", +} + +func (GCPResourceLabel) SwaggerDoc() map[string]string { + return map_GCPResourceLabel +} + +var map_GCPResourceTag = map[string]string{ + "": "GCPResourceTag is a tag to apply to GCP resources created for the cluster.", + "parentID": "parentID is the ID of the hierarchical resource where the tags are defined, e.g. at the Organization or the Project level. To find the Organization or Project ID refer to the following pages: https://cloud.google.com/resource-manager/docs/creating-managing-organization#retrieving_your_organization_id, https://cloud.google.com/resource-manager/docs/creating-managing-projects#identifying_projects. An OrganizationID must consist of decimal numbers, and cannot have leading zeroes. A ProjectID must be 6 to 30 characters in length, can only contain lowercase letters, numbers, and hyphens, and must start with a letter, and cannot end with a hyphen.", + "key": "key is the key part of the tag. A tag key can have a maximum of 63 characters and cannot be empty. Tag key must begin and end with an alphanumeric character, and must contain only uppercase, lowercase alphanumeric characters, and the following special characters `._-`.", + "value": "value is the value part of the tag. A tag value can have a maximum of 63 characters and cannot be empty. Tag value must begin and end with an alphanumeric character, and must contain only uppercase, lowercase alphanumeric characters, and the following special characters `_-.@%=+:,*#&(){}[]` and spaces.", +} + +func (GCPResourceTag) SwaggerDoc() map[string]string { + return map_GCPResourceTag +} + var map_IBMCloudPlatformSpec = map[string]string{ "": "IBMCloudPlatformSpec holds the desired state of the IBMCloud infrastructure provider. This only includes fields that can be modified in the cluster.", } @@ -1176,9 +1291,10 @@ func (IBMCloudPlatformStatus) SwaggerDoc() map[string]string { } var map_Infrastructure = map[string]string{ - "": "Infrastructure holds cluster-wide information about Infrastructure. The canonical name is `cluster`\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "spec": "spec holds user settable values for configuration", - "status": "status holds observed values from the cluster. They may not be overridden.", + "": "Infrastructure holds cluster-wide information about Infrastructure. The canonical name is `cluster`\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", } func (Infrastructure) SwaggerDoc() map[string]string { @@ -1186,7 +1302,8 @@ func (Infrastructure) SwaggerDoc() map[string]string { } var map_InfrastructureList = map[string]string{ - "": "InfrastructureList is\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "": "InfrastructureList is\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", } func (InfrastructureList) SwaggerDoc() map[string]string { @@ -1213,6 +1330,7 @@ var map_InfrastructureStatus = map[string]string{ "apiServerInternalURI": "apiServerInternalURL is a valid URI with scheme 'https', address and optionally a port (defaulting to 443). apiServerInternalURL can be used by components like kubelets, to contact the Kubernetes API server using the infrastructure provider rather than Kubernetes networking.", "controlPlaneTopology": "controlPlaneTopology expresses the expectations for operands that normally run on control nodes. The default is 'HighlyAvailable', which represents the behavior operators have in a \"normal\" cluster. The 'SingleReplica' mode will be used in single-node deployments and the operators should not configure the operand for highly-available operation The 'External' mode indicates that the control plane is hosted externally to the cluster and that its components are not visible within the cluster.", "infrastructureTopology": "infrastructureTopology expresses the expectations for infrastructure services that do not run on control plane nodes, usually indicated by a node selector for a `role` value other than `master`. The default is 'HighlyAvailable', which represents the behavior operators have in a \"normal\" cluster. The 'SingleReplica' mode will be used in single-node deployments and the operators should not configure the operand for highly-available operation NOTE: External topology mode is not applicable for this field.", + "cpuPartitioning": "cpuPartitioning expresses if CPU partitioning is a currently enabled feature in the cluster. CPU Partitioning means that this cluster can support partitioning workloads to specific CPU Sets. Valid values are \"None\" and \"AllNodes\". When omitted, the default value is \"None\". The default value of \"None\" indicates that no nodes will be setup with CPU partitioning. The \"AllNodes\" value indicates that all nodes have been setup with CPU partitioning, and can then be further configured via the PerformanceProfile API.", } func (InfrastructureStatus) SwaggerDoc() map[string]string { @@ -1237,6 +1355,15 @@ func (KubevirtPlatformStatus) SwaggerDoc() map[string]string { return map_KubevirtPlatformStatus } +var map_NutanixPlatformLoadBalancer = map[string]string{ + "": "NutanixPlatformLoadBalancer defines the load balancer used by the cluster on Nutanix platform.", + "type": "type defines the type of load balancer used by the cluster on Nutanix platform which can be a user-managed or openshift-managed load balancer that is to be used for the OpenShift API and Ingress endpoints. When set to OpenShiftManagedDefault the static pods in charge of API and Ingress traffic load-balancing defined in the machine config operator will be deployed. When set to UserManaged these static pods will not be deployed and it is expected that the load balancer is configured out of band by the deployer. When omitted, this means no opinion and the platform is left to choose a reasonable default. The default value is OpenShiftManagedDefault.", +} + +func (NutanixPlatformLoadBalancer) SwaggerDoc() map[string]string { + return map_NutanixPlatformLoadBalancer +} + var map_NutanixPlatformSpec = map[string]string{ "": "NutanixPlatformSpec holds the desired state of the Nutanix infrastructure provider. This only includes fields that can be modified in the cluster.", "prismCentral": "prismCentral holds the endpoint address and port to access the Nutanix Prism Central. When a cluster-wide proxy is installed, by default, this endpoint will be accessed via the proxy. Should you wish for communication with this endpoint not to be proxied, please add the endpoint to the proxy spec.noProxy list.", @@ -1253,6 +1380,7 @@ var map_NutanixPlatformStatus = map[string]string{ "apiServerInternalIPs": "apiServerInternalIPs are the IP addresses to contact the Kubernetes API server that can be used by components inside the cluster, like kubelets using the infrastructure rather than Kubernetes networking. These are the IPs for a self-hosted load balancer in front of the API servers. In dual stack clusters this list contains two IPs otherwise only one.", "ingressIP": "ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names.\n\nDeprecated: Use IngressIPs instead.", "ingressIPs": "ingressIPs are the external IPs which route to the default ingress controller. The IPs are suitable targets of a wildcard DNS record used to resolve default route host names. In dual stack clusters this list contains two IPs otherwise only one.", + "loadBalancer": "loadBalancer defines how the load balancer used by the cluster is configured.", } func (NutanixPlatformStatus) SwaggerDoc() map[string]string { @@ -1279,6 +1407,15 @@ func (NutanixPrismEndpoint) SwaggerDoc() map[string]string { return map_NutanixPrismEndpoint } +var map_OpenStackPlatformLoadBalancer = map[string]string{ + "": "OpenStackPlatformLoadBalancer defines the load balancer used by the cluster on OpenStack platform.", + "type": "type defines the type of load balancer used by the cluster on OpenStack platform which can be a user-managed or openshift-managed load balancer that is to be used for the OpenShift API and Ingress endpoints. When set to OpenShiftManagedDefault the static pods in charge of API and Ingress traffic load-balancing defined in the machine config operator will be deployed. When set to UserManaged these static pods will not be deployed and it is expected that the load balancer is configured out of band by the deployer. When omitted, this means no opinion and the platform is left to choose a reasonable default. The default value is OpenShiftManagedDefault.", +} + +func (OpenStackPlatformLoadBalancer) SwaggerDoc() map[string]string { + return map_OpenStackPlatformLoadBalancer +} + var map_OpenStackPlatformSpec = map[string]string{ "": "OpenStackPlatformSpec holds the desired state of the OpenStack infrastructure provider. This only includes fields that can be modified in the cluster.", } @@ -1295,12 +1432,22 @@ var map_OpenStackPlatformStatus = map[string]string{ "ingressIP": "ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names.\n\nDeprecated: Use IngressIPs instead.", "ingressIPs": "ingressIPs are the external IPs which route to the default ingress controller. The IPs are suitable targets of a wildcard DNS record used to resolve default route host names. In dual stack clusters this list contains two IPs otherwise only one.", "nodeDNSIP": "nodeDNSIP is the IP address for the internal DNS used by the nodes. Unlike the one managed by the DNS operator, `NodeDNSIP` provides name resolution for the nodes themselves. There is no DNS-as-a-service for OpenStack deployments. In order to minimize necessary changes to the datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames to the nodes in the cluster.", + "loadBalancer": "loadBalancer defines how the load balancer used by the cluster is configured.", } func (OpenStackPlatformStatus) SwaggerDoc() map[string]string { return map_OpenStackPlatformStatus } +var map_OvirtPlatformLoadBalancer = map[string]string{ + "": "OvirtPlatformLoadBalancer defines the load balancer used by the cluster on Ovirt platform.", + "type": "type defines the type of load balancer used by the cluster on Ovirt platform which can be a user-managed or openshift-managed load balancer that is to be used for the OpenShift API and Ingress endpoints. When set to OpenShiftManagedDefault the static pods in charge of API and Ingress traffic load-balancing defined in the machine config operator will be deployed. When set to UserManaged these static pods will not be deployed and it is expected that the load balancer is configured out of band by the deployer. When omitted, this means no opinion and the platform is left to choose a reasonable default. The default value is OpenShiftManagedDefault.", +} + +func (OvirtPlatformLoadBalancer) SwaggerDoc() map[string]string { + return map_OvirtPlatformLoadBalancer +} + var map_OvirtPlatformSpec = map[string]string{ "": "OvirtPlatformSpec holds the desired state of the oVirt infrastructure provider. This only includes fields that can be modified in the cluster.", } @@ -1316,6 +1463,7 @@ var map_OvirtPlatformStatus = map[string]string{ "ingressIP": "ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names.\n\nDeprecated: Use IngressIPs instead.", "ingressIPs": "ingressIPs are the external IPs which route to the default ingress controller. The IPs are suitable targets of a wildcard DNS record used to resolve default route host names. In dual stack clusters this list contains two IPs otherwise only one.", "nodeDNSIP": "deprecated: as of 4.6, this field is no longer set or honored. It will be removed in a future release.", + "loadBalancer": "loadBalancer defines how the load balancer used by the cluster is configured.", } func (OvirtPlatformStatus) SwaggerDoc() map[string]string { @@ -1381,6 +1529,7 @@ var map_PowerVSPlatformStatus = map[string]string{ "": "PowerVSPlatformStatus holds the current status of the IBM Power Systems Virtual Servers infrastrucutre provider.", "region": "region holds the default Power VS region for new Power VS resources created by the cluster.", "zone": "zone holds the default zone for the new Power VS resources created by the cluster. Note: Currently only single-zone OCP clusters are supported", + "resourceGroup": "resourceGroup is the resource group name for new IBMCloud resources created for a cluster. The resource group specified here will be used by cluster-image-registry-operator to set up a COS Instance in IBMCloud for the cluster registry. More about resource groups can be found here: https://cloud.ibm.com/docs/account?topic=account-rgs. When omitted, the image registry operator won't be able to configure storage, which results in the image registry cluster operator not being in an available state.", "serviceEndpoints": "serviceEndpoints is a list of custom endpoints which will override the default service endpoints of a Power VS service.", "cisInstanceCRN": "CISInstanceCRN is the CRN of the Cloud Internet Services instance managing the DNS zone for the cluster's base domain", "dnsInstanceCRN": "DNSInstanceCRN is the CRN of the DNS Services instance managing the DNS zone for the cluster's base domain", @@ -1413,6 +1562,15 @@ func (VSpherePlatformFailureDomainSpec) SwaggerDoc() map[string]string { return map_VSpherePlatformFailureDomainSpec } +var map_VSpherePlatformLoadBalancer = map[string]string{ + "": "VSpherePlatformLoadBalancer defines the load balancer used by the cluster on VSphere platform.", + "type": "type defines the type of load balancer used by the cluster on VSphere platform which can be a user-managed or openshift-managed load balancer that is to be used for the OpenShift API and Ingress endpoints. When set to OpenShiftManagedDefault the static pods in charge of API and Ingress traffic load-balancing defined in the machine config operator will be deployed. When set to UserManaged these static pods will not be deployed and it is expected that the load balancer is configured out of band by the deployer. When omitted, this means no opinion and the platform is left to choose a reasonable default. The default value is OpenShiftManagedDefault.", +} + +func (VSpherePlatformLoadBalancer) SwaggerDoc() map[string]string { + return map_VSpherePlatformLoadBalancer +} + var map_VSpherePlatformNodeNetworking = map[string]string{ "": "VSpherePlatformNodeNetworking holds the external and internal node networking spec.", "external": "external represents the network configuration of the node that is externally routable.", @@ -1452,6 +1610,7 @@ var map_VSpherePlatformStatus = map[string]string{ "ingressIP": "ingressIP is an external IP which routes to the default ingress controller. The IP is a suitable target of a wildcard DNS record used to resolve default route host names.\n\nDeprecated: Use IngressIPs instead.", "ingressIPs": "ingressIPs are the external IPs which route to the default ingress controller. The IPs are suitable targets of a wildcard DNS record used to resolve default route host names. In dual stack clusters this list contains two IPs otherwise only one.", "nodeDNSIP": "nodeDNSIP is the IP address for the internal DNS used by the nodes. Unlike the one managed by the DNS operator, `NodeDNSIP` provides name resolution for the nodes themselves. There is no DNS-as-a-service for vSphere deployments. In order to minimize necessary changes to the datacenter DNS, a DNS service is hosted as a static pod to serve those hostnames to the nodes in the cluster.", + "loadBalancer": "loadBalancer defines how the load balancer used by the cluster is configured.", } func (VSpherePlatformStatus) SwaggerDoc() map[string]string { @@ -1520,9 +1679,10 @@ func (ComponentRouteStatus) SwaggerDoc() map[string]string { } var map_Ingress = map[string]string{ - "": "Ingress holds cluster-wide information about ingress, including the default ingress domain used for routes. The canonical name is `cluster`.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "spec": "spec holds user settable values for configuration", - "status": "status holds observed values from the cluster. They may not be overridden.", + "": "Ingress holds cluster-wide information about ingress, including the default ingress domain used for routes. The canonical name is `cluster`.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", } func (Ingress) SwaggerDoc() map[string]string { @@ -1530,7 +1690,8 @@ func (Ingress) SwaggerDoc() map[string]string { } var map_IngressList = map[string]string{ - "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", } func (IngressList) SwaggerDoc() map[string]string { @@ -1627,9 +1788,10 @@ func (MTUMigrationValues) SwaggerDoc() map[string]string { } var map_Network = map[string]string{ - "": "Network holds cluster-wide information about Network. The canonical name is `cluster`. It is used to configure the desired network configuration, such as: IP address pools for services/pod IPs, network plugin, etc. Please view network.spec for an explanation on what applies when configuring this resource.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "spec": "spec holds user settable values for configuration. As a general rule, this SHOULD NOT be read directly. Instead, you should consume the NetworkStatus, as it indicates the currently deployed configuration. Currently, most spec fields are immutable after installation. Please view the individual ones for further details on each.", - "status": "status holds observed values from the cluster. They may not be overridden.", + "": "Network holds cluster-wide information about Network. The canonical name is `cluster`. It is used to configure the desired network configuration, such as: IP address pools for services/pod IPs, network plugin, etc. Please view network.spec for an explanation on what applies when configuring this resource.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec holds user settable values for configuration. As a general rule, this SHOULD NOT be read directly. Instead, you should consume the NetworkStatus, as it indicates the currently deployed configuration. Currently, most spec fields are immutable after installation. Please view the individual ones for further details on each.", + "status": "status holds observed values from the cluster. They may not be overridden.", } func (Network) SwaggerDoc() map[string]string { @@ -1637,7 +1799,8 @@ func (Network) SwaggerDoc() map[string]string { } var map_NetworkList = map[string]string{ - "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", } func (NetworkList) SwaggerDoc() map[string]string { @@ -1681,9 +1844,10 @@ func (NetworkStatus) SwaggerDoc() map[string]string { } var map_Node = map[string]string{ - "": "Node holds cluster-wide information about node specific features.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "spec": "spec holds user settable values for configuration", - "status": "status holds observed values.", + "": "Node holds cluster-wide information about node specific features.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values.", } func (Node) SwaggerDoc() map[string]string { @@ -1691,7 +1855,8 @@ func (Node) SwaggerDoc() map[string]string { } var map_NodeList = map[string]string{ - "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", } func (NodeList) SwaggerDoc() map[string]string { @@ -1825,9 +1990,10 @@ func (LDAPIdentityProvider) SwaggerDoc() map[string]string { } var map_OAuth = map[string]string{ - "": "OAuth holds cluster-wide information about OAuth. The canonical name is `cluster`. It is used to configure the integrated OAuth server. This configuration is only honored when the top level Authentication config has type set to IntegratedOAuth.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "spec": "spec holds user settable values for configuration", - "status": "status holds observed values from the cluster. They may not be overridden.", + "": "OAuth holds cluster-wide information about OAuth. The canonical name is `cluster`. It is used to configure the integrated OAuth server. This configuration is only honored when the top level Authentication config has type set to IntegratedOAuth.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", } func (OAuth) SwaggerDoc() map[string]string { @@ -1835,7 +2001,8 @@ func (OAuth) SwaggerDoc() map[string]string { } var map_OAuthList = map[string]string{ - "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", } func (OAuthList) SwaggerDoc() map[string]string { @@ -1959,7 +2126,8 @@ func (HubSourceStatus) SwaggerDoc() map[string]string { } var map_OperatorHub = map[string]string{ - "": "OperatorHub is the Schema for the operatorhubs API. It can be used to change the state of the default hub sources for OperatorHub on the cluster from enabled to disabled and vice versa.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "": "OperatorHub is the Schema for the operatorhubs API. It can be used to change the state of the default hub sources for OperatorHub on the cluster from enabled to disabled and vice versa.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", } func (OperatorHub) SwaggerDoc() map[string]string { @@ -1967,7 +2135,8 @@ func (OperatorHub) SwaggerDoc() map[string]string { } var map_OperatorHubList = map[string]string{ - "": "OperatorHubList contains a list of OperatorHub\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "": "OperatorHubList contains a list of OperatorHub\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", } func (OperatorHubList) SwaggerDoc() map[string]string { @@ -1994,9 +2163,10 @@ func (OperatorHubStatus) SwaggerDoc() map[string]string { } var map_Project = map[string]string{ - "": "Project holds cluster-wide information about Project. The canonical name is `cluster`\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "spec": "spec holds user settable values for configuration", - "status": "status holds observed values from the cluster. They may not be overridden.", + "": "Project holds cluster-wide information about Project. The canonical name is `cluster`\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", } func (Project) SwaggerDoc() map[string]string { @@ -2004,7 +2174,8 @@ func (Project) SwaggerDoc() map[string]string { } var map_ProjectList = map[string]string{ - "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", } func (ProjectList) SwaggerDoc() map[string]string { @@ -2031,9 +2202,10 @@ func (TemplateReference) SwaggerDoc() map[string]string { } var map_Proxy = map[string]string{ - "": "Proxy holds cluster-wide information on how to configure default proxies for the cluster. The canonical name is `cluster`\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "spec": "Spec holds user-settable values for the proxy configuration", - "status": "status holds observed values from the cluster. They may not be overridden.", + "": "Proxy holds cluster-wide information on how to configure default proxies for the cluster. The canonical name is `cluster`\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Spec holds user-settable values for the proxy configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", } func (Proxy) SwaggerDoc() map[string]string { @@ -2041,7 +2213,8 @@ func (Proxy) SwaggerDoc() map[string]string { } var map_ProxyList = map[string]string{ - "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", } func (ProxyList) SwaggerDoc() map[string]string { @@ -2073,9 +2246,10 @@ func (ProxyStatus) SwaggerDoc() map[string]string { } var map_Scheduler = map[string]string{ - "": "Scheduler holds cluster-wide config information to run the Kubernetes Scheduler and influence its placement decisions. The canonical name for this config is `cluster`.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "spec": "spec holds user settable values for configuration", - "status": "status holds observed values from the cluster. They may not be overridden.", + "": "Scheduler holds cluster-wide config information to run the Kubernetes Scheduler and influence its placement decisions. The canonical name for this config is `cluster`.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", } func (Scheduler) SwaggerDoc() map[string]string { @@ -2083,7 +2257,8 @@ func (Scheduler) SwaggerDoc() map[string]string { } var map_SchedulerList = map[string]string{ - "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", } func (SchedulerList) SwaggerDoc() map[string]string { diff --git a/vendor/github.com/openshift/api/config/v1alpha1/0000_10_config-operator_01_backup-TechPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/config/v1alpha1/0000_10_config-operator_01_backup-TechPreviewNoUpgrade.crd.yaml new file mode 100644 index 000000000..0ee3bdea4 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1alpha1/0000_10_config-operator_01_backup-TechPreviewNoUpgrade.crd.yaml @@ -0,0 +1,100 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/1482 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + release.openshift.io/feature-set: TechPreviewNoUpgrade + name: backups.config.openshift.io +spec: + group: config.openshift.io + names: + kind: Backup + listKind: BackupList + plural: backups + singular: backup + scope: Cluster + versions: + - name: v1alpha1 + served: true + storage: true + subresources: + status: {} + schema: + openAPIV3Schema: + description: "Backup provides configuration for performing backups of the openshift cluster. \n Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support." + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + type: object + required: + - etcd + properties: + etcd: + description: etcd specifies the configuration for periodic backups of the etcd cluster + type: object + properties: + pvcName: + description: PVCName specifies the name of the PersistentVolumeClaim (PVC) which binds a PersistentVolume where the etcd backup files would be saved The PVC itself must always be created in the "openshift-etcd" namespace If the PVC is left unspecified "" then the platform will choose a reasonable default location to save the backup. In the future this would be backups saved across the control-plane master nodes. + type: string + retentionPolicy: + description: RetentionPolicy defines the retention policy for retaining and deleting existing backups. + type: object + required: + - retentionType + properties: + retentionNumber: + description: RetentionNumber configures the retention policy based on the number of backups + type: object + required: + - maxNumberOfBackups + properties: + maxNumberOfBackups: + description: MaxNumberOfBackups defines the maximum number of backups to retain. If the existing number of backups saved is equal to MaxNumberOfBackups then the oldest backup will be removed before a new backup is initiated. + type: integer + minimum: 1 + retentionSize: + description: RetentionSize configures the retention policy based on the size of backups + type: object + required: + - maxSizeOfBackupsGb + properties: + maxSizeOfBackupsGb: + description: MaxSizeOfBackupsGb defines the total size in GB of backups to retain. If the current total size backups exceeds MaxSizeOfBackupsGb then the oldest backup will be removed before a new backup is initiated. + type: integer + minimum: 1 + retentionType: + description: RetentionType sets the type of retention policy. Currently, the only valid policies are retention by number of backups (RetentionNumber), by the size of backups (RetentionSize). More policies or types may be added in the future. Empty string means no opinion and the platform is left to choose a reasonable default which is subject to change without notice. The current default is RetentionNumber with 15 backups kept. + type: string + allOf: + - enum: + - RetentionNumber + - RetentionSize + - enum: + - "" + - RetentionNumber + - RetentionSize + schedule: + description: 'Schedule defines the recurring backup schedule in Cron format every 2 hours: 0 */2 * * * every day at 3am: 0 3 * * * Empty string means no opinion and the platform is left to choose a reasonable default which is subject to change without notice. The current default is "no backups", but will change in the future.' + type: string + pattern: ^(@(annually|yearly|monthly|weekly|daily|hourly))|(\*|(?:\*|(?:[0-9]|(?:[1-5][0-9])))\/(?:[0-9]|(?:[1-5][0-9]))|(?:[0-9]|(?:[1-5][0-9]))(?:(?:\-[0-9]|\-(?:[1-5][0-9]))?|(?:\,(?:[0-9]|(?:[1-5][0-9])))*)) (\*|(?:\*|(?:\*|(?:[0-9]|1[0-9]|2[0-3])))\/(?:[0-9]|1[0-9]|2[0-3])|(?:[0-9]|1[0-9]|2[0-3])(?:(?:\-(?:[0-9]|1[0-9]|2[0-3]))?|(?:\,(?:[0-9]|1[0-9]|2[0-3]))*)) (\*|(?:[1-9]|(?:[12][0-9])|3[01])(?:(?:\-(?:[1-9]|(?:[12][0-9])|3[01]))?|(?:\,(?:[1-9]|(?:[12][0-9])|3[01]))*)) (\*|(?:[1-9]|1[012]|JAN|FEB|MAR|APR|MAY|JUN|JUL|AUG|SEP|OCT|NOV|DEC)(?:(?:\-(?:[1-9]|1[012]|JAN|FEB|MAR|APR|MAY|JUN|JUL|AUG|SEP|OCT|NOV|DEC))?|(?:\,(?:[1-9]|1[012]|JAN|FEB|MAR|APR|MAY|JUN|JUL|AUG|SEP|OCT|NOV|DEC))*)) (\*|(?:[0-6]|SUN|MON|TUE|WED|THU|FRI|SAT)(?:(?:\-(?:[0-6]|SUN|MON|TUE|WED|THU|FRI|SAT))?|(?:\,(?:[0-6]|SUN|MON|TUE|WED|THU|FRI|SAT))*))$ + timeZone: + description: The time zone name for the given schedule, see https://en.wikipedia.org/wiki/List_of_tz_database_time_zones. If not specified, this will default to the time zone of the kube-controller-manager process. See https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#time-zones + type: string + pattern: ^([A-Za-z_]+([+-]*0)*|[A-Za-z_]+(\/[A-Za-z_]+){1,2})(\/GMT[+-]\d{1,2})?$ + status: + description: status holds observed values from the cluster. They may not be overridden. + type: object diff --git a/vendor/github.com/openshift/api/config/v1alpha1/0000_10_config-operator_01_insightsdatagather.crd.yaml b/vendor/github.com/openshift/api/config/v1alpha1/0000_10_config-operator_01_insightsdatagather.crd.yaml new file mode 100644 index 000000000..8120185e2 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1alpha1/0000_10_config-operator_01_insightsdatagather.crd.yaml @@ -0,0 +1,62 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/1245 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + release.openshift.io/feature-set: TechPreviewNoUpgrade + name: insightsdatagathers.config.openshift.io +spec: + group: config.openshift.io + names: + kind: InsightsDataGather + listKind: InsightsDataGatherList + plural: insightsdatagathers + singular: insightsdatagather + scope: Cluster + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: "InsightsDataGather provides data gather configuration options for the the Insights Operator. \n Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support." + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + type: object + properties: + gatherConfig: + description: gatherConfig spec attribute includes all the configuration options related to gathering of the Insights data and its uploading to the ingress. + type: object + properties: + dataPolicy: + description: dataPolicy allows user to enable additional global obfuscation of the IP addresses and base domain in the Insights archive data. Valid values are "None" and "ObfuscateNetworking". When set to None the data is not obfuscated. When set to ObfuscateNetworking the IP addresses and the cluster domain name are obfuscated. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default is None. + type: string + enum: + - "" + - None + - ObfuscateNetworking + disabledGatherers: + description: 'disabledGatherers is a list of gatherers to be excluded from the gathering. All the gatherers can be disabled by providing "all" value. If all the gatherers are disabled, the Insights operator does not gather any data. The particular gatherers IDs can be found at https://github.com/openshift/insights-operator/blob/master/docs/gathered-data.md. Run the following command to get the names of last active gatherers: "oc get insightsoperators.operator.openshift.io cluster -o json | jq ''.status.gatherStatus.gatherers[].name''" An example of disabling gatherers looks like this: `disabledGatherers: ["clusterconfig/machine_configs", "workloads/workload_info"]`' + type: array + items: + type: string + status: + description: status holds observed values from the cluster. They may not be overridden. + type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/config/v1alpha1/Makefile b/vendor/github.com/openshift/api/config/v1alpha1/Makefile new file mode 100644 index 000000000..e32ad5d9e --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1alpha1/Makefile @@ -0,0 +1,3 @@ +.PHONY: test +test: + make -C ../../tests test GINKGO_EXTRA_ARGS=--focus="config.openshift.io/v1alpha1" diff --git a/vendor/github.com/openshift/api/config/v1alpha1/doc.go b/vendor/github.com/openshift/api/config/v1alpha1/doc.go new file mode 100644 index 000000000..20d448573 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1alpha1/doc.go @@ -0,0 +1,8 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true + +// +kubebuilder:validation:Optional +// +groupName=config.openshift.io +// Package v1alpha1 is the v1alpha1 version of the API. +package v1alpha1 diff --git a/vendor/github.com/openshift/api/config/v1alpha1/register.go b/vendor/github.com/openshift/api/config/v1alpha1/register.go new file mode 100644 index 000000000..7ec30d7aa --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1alpha1/register.go @@ -0,0 +1,40 @@ +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + GroupName = "config.openshift.io" + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // Install is a function which adds this version to a scheme + Install = schemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = schemeBuilder.AddToScheme +) + +// Resource generated code relies on this being here, but it logically belongs to the group +// DEPRECATED +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &InsightsDataGather{}, + &InsightsDataGatherList{}, + &Backup{}, + &BackupList{}, + ) + metav1.AddToGroupVersion(scheme, GroupVersion) + return nil +} diff --git a/vendor/github.com/openshift/api/config/v1alpha1/techpreview.backup.testsuite.yaml b/vendor/github.com/openshift/api/config/v1alpha1/techpreview.backup.testsuite.yaml new file mode 100644 index 000000000..91836dd93 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1alpha1/techpreview.backup.testsuite.yaml @@ -0,0 +1,202 @@ +apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this +name: "[TechPreview] Backup" +crd: 0000_10_config-operator_01_backup-TechPreviewNoUpgrade.crd.yaml +tests: + onCreate: + - name: Should be able to create a Backup with a valid spec + initial: | + apiVersion: config.openshift.io/v1alpha1 + kind: Backup + spec: + etcd: + schedule: "* 2 * * *" + pvcName: etcdbackup-pvc + expected: | + apiVersion: config.openshift.io/v1alpha1 + kind: Backup + spec: + etcd: + schedule: "* 2 * * *" + pvcName: etcdbackup-pvc + - name: Should be able to create an EtcdBackup without the pvcName specified + initial: | + apiVersion: config.openshift.io/v1alpha1 + kind: Backup + spec: + etcd: + schedule: "* 2 * * *" + expected: | + apiVersion: config.openshift.io/v1alpha1 + kind: Backup + spec: + etcd: + schedule: "* 2 * * *" + - name: Should be able to create a Backup with a valid schedule - At 22:00 on every day-of-week from Monday through Friday + initial: | + apiVersion: config.openshift.io/v1alpha1 + kind: Backup + spec: + etcd: + schedule: "0 22 * * 1-5" + pvcName: etcdbackup-pvc + expected: | + apiVersion: config.openshift.io/v1alpha1 + kind: Backup + spec: + etcd: + schedule: "0 22 * * 1-5" + pvcName: etcdbackup-pvc + - name: Should be able to create a Backup with a valid schedule - At 04:05 on Sunday. + initial: | + apiVersion: config.openshift.io/v1alpha1 + kind: Backup + spec: + etcd: + schedule: "5 4 * * SUN" + pvcName: etcdbackup-pvc + expected: | + apiVersion: config.openshift.io/v1alpha1 + kind: Backup + spec: + etcd: + schedule: "5 4 * * SUN" + pvcName: etcdbackup-pvc + - name: Should be able to create a Backup with a valid schedule - Predefined hourly + initial: | + apiVersion: config.openshift.io/v1alpha1 + kind: Backup + spec: + etcd: + schedule: "@hourly" + pvcName: etcdbackup-pvc + expected: | + apiVersion: config.openshift.io/v1alpha1 + kind: Backup + spec: + etcd: + schedule: "@hourly" + pvcName: etcdbackup-pvc + - name: Should fail to create an EtcdBackup with an invalid schedule - At 04:05 on invalid day FOO. + initial: | + apiVersion: config.openshift.io/v1alpha1 + kind: Backup + spec: + etcd: + schedule: "5 4 * * FOO" + pvcName: etcdbackup-pvc + expectedError: "spec.etcd.schedule in body should match" + - name: Should fail to create an EtcdBackup with an invalid schedule - Predefined typo @hourli instead of @hourly. + initial: | + apiVersion: config.openshift.io/v1alpha1 + kind: Backup + spec: + etcd: + schedule: "@hourli" + pvcName: etcdbackup-pvc + expectedError: "spec.etcd.schedule in body should match" + - name: Should fail to create an EtcdBackup with an invalid schedule - Non standard L last Friday in month + initial: | + apiVersion: config.openshift.io/v1alpha1 + kind: Backup + spec: + etcd: + schedule: "* * * * 5L" + pvcName: etcdbackup-pvc + expectedError: "spec.etcd.schedule in body should match" + - name: Should fail to create an EtcdBackup with an invalid schedule - Non standard L 5th day before last day of month + initial: | + apiVersion: config.openshift.io/v1alpha1 + kind: Backup + spec: + etcd: + schedule: "* * L-5 * *" + pvcName: etcdbackup-pvc + expectedError: "spec.etcd.schedule in body should match" + - name: Should fail to create an EtcdBackup with an invalid schedule - Non standard W closest weekday to 15th of month + initial: | + apiVersion: config.openshift.io/v1alpha1 + kind: Backup + spec: + etcd: + schedule: "* * 15W * *" + pvcName: etcdbackup-pvc + expectedError: "spec.etcd.schedule in body should match" + - name: Should be able to create a Backup with a valid time zone - Africa/Banjul + initial: | + apiVersion: config.openshift.io/v1alpha1 + kind: Backup + spec: + etcd: + timeZone: Africa/Banjul + pvcName: etcdbackup-pvc + expected: | + apiVersion: config.openshift.io/v1alpha1 + kind: Backup + spec: + etcd: + timeZone: Africa/Banjul + pvcName: etcdbackup-pvc + - name: Should be able to create a Backup with a valid time zone - Etc/GMT-8 + initial: | + apiVersion: config.openshift.io/v1alpha1 + kind: Backup + spec: + etcd: + timeZone: Etc/GMT-8 + pvcName: etcdbackup-pvc + expected: | + apiVersion: config.openshift.io/v1alpha1 + kind: Backup + spec: + etcd: + timeZone: Etc/GMT-8 + pvcName: etcdbackup-pvc + - name: Should be able to create a Backup with a valid time zone - Etc/UTC + initial: | + apiVersion: config.openshift.io/v1alpha1 + kind: Backup + spec: + etcd: + timeZone: Etc/UTC + pvcName: etcdbackup-pvc + expected: | + apiVersion: config.openshift.io/v1alpha1 + kind: Backup + spec: + etcd: + timeZone: Etc/UTC + pvcName: etcdbackup-pvc + - name: Should be able to create a Backup with a valid time zone - America/Argentina/Catamarca + initial: | + apiVersion: config.openshift.io/v1alpha1 + kind: Backup + spec: + etcd: + timeZone: America/Argentina/Catamarca + pvcName: etcdbackup-pvc + expected: | + apiVersion: config.openshift.io/v1alpha1 + kind: Backup + spec: + etcd: + timeZone: America/Argentina/Catamarca + pvcName: etcdbackup-pvc + - name: Should fail to create an EtcdBackup with an invalid time zone - GMT2 + initial: | + apiVersion: config.openshift.io/v1alpha1 + kind: Backup + spec: + etcd: + timeZone: GMT2 + pvcName: etcdbackup-pvc + expectedError: "spec.etcd.timeZone in body should match" + - name: Should fail to create an EtcdBackup with an invalid time zone - GMT+3 + initial: | + apiVersion: config.openshift.io/v1alpha1 + kind: Backup + spec: + etcd: + timeZone: GMT+3 + pvcName: etcdbackup-pvc + expectedError: "spec.etcd.timeZone in body should match" + diff --git a/vendor/github.com/openshift/api/config/v1alpha1/techpreview.insightsdatagather.testsuite.yaml b/vendor/github.com/openshift/api/config/v1alpha1/techpreview.insightsdatagather.testsuite.yaml new file mode 100644 index 000000000..f73792738 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1alpha1/techpreview.insightsdatagather.testsuite.yaml @@ -0,0 +1,14 @@ +apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this +name: "[Stable] InsightsDataGather" +crd: 0000_10_config-operator_01_insightsdatagather.crd.yaml +tests: + onCreate: + - name: Should be able to create a minimal InsightsDataGather + initial: | + apiVersion: config.openshift.io/v1alpha1 + kind: InsightsDataGather + spec: {} # No spec is required for a InsightsDataGather + expected: | + apiVersion: config.openshift.io/v1alpha1 + kind: InsightsDataGather + spec: {} diff --git a/vendor/github.com/openshift/api/config/v1alpha1/types_backup.go b/vendor/github.com/openshift/api/config/v1alpha1/types_backup.go new file mode 100644 index 000000000..9af55b540 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1alpha1/types_backup.go @@ -0,0 +1,168 @@ +package v1alpha1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +// Backup provides configuration for performing backups of the openshift cluster. +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +type Backup struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration + // +kubebuilder:validation:Required + // +required + Spec BackupSpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + // +kubebuilder:validation:Optional + // +optional + Status BackupStatus `json:"status"` +} + +type BackupSpec struct { + // etcd specifies the configuration for periodic backups of the etcd cluster + // +kubebuilder:validation:Required + EtcdBackupSpec EtcdBackupSpec `json:"etcd"` +} + +type BackupStatus struct { +} + +// EtcdBackupSpec provides configuration for automated etcd backups to the cluster-etcd-operator +type EtcdBackupSpec struct { + + // Schedule defines the recurring backup schedule in Cron format + // every 2 hours: 0 */2 * * * + // every day at 3am: 0 3 * * * + // Empty string means no opinion and the platform is left to choose a reasonable default which is subject to change without notice. + // The current default is "no backups", but will change in the future. + // +kubebuilder:validation:Optional + // +optional + // +kubebuilder:validation:Pattern:=`^(@(annually|yearly|monthly|weekly|daily|hourly))|(\*|(?:\*|(?:[0-9]|(?:[1-5][0-9])))\/(?:[0-9]|(?:[1-5][0-9]))|(?:[0-9]|(?:[1-5][0-9]))(?:(?:\-[0-9]|\-(?:[1-5][0-9]))?|(?:\,(?:[0-9]|(?:[1-5][0-9])))*)) (\*|(?:\*|(?:\*|(?:[0-9]|1[0-9]|2[0-3])))\/(?:[0-9]|1[0-9]|2[0-3])|(?:[0-9]|1[0-9]|2[0-3])(?:(?:\-(?:[0-9]|1[0-9]|2[0-3]))?|(?:\,(?:[0-9]|1[0-9]|2[0-3]))*)) (\*|(?:[1-9]|(?:[12][0-9])|3[01])(?:(?:\-(?:[1-9]|(?:[12][0-9])|3[01]))?|(?:\,(?:[1-9]|(?:[12][0-9])|3[01]))*)) (\*|(?:[1-9]|1[012]|JAN|FEB|MAR|APR|MAY|JUN|JUL|AUG|SEP|OCT|NOV|DEC)(?:(?:\-(?:[1-9]|1[012]|JAN|FEB|MAR|APR|MAY|JUN|JUL|AUG|SEP|OCT|NOV|DEC))?|(?:\,(?:[1-9]|1[012]|JAN|FEB|MAR|APR|MAY|JUN|JUL|AUG|SEP|OCT|NOV|DEC))*)) (\*|(?:[0-6]|SUN|MON|TUE|WED|THU|FRI|SAT)(?:(?:\-(?:[0-6]|SUN|MON|TUE|WED|THU|FRI|SAT))?|(?:\,(?:[0-6]|SUN|MON|TUE|WED|THU|FRI|SAT))*))$` + Schedule string `json:"schedule"` + + // Cron Regex breakdown: + // Allow macros: (@(annually|yearly|monthly|weekly|daily|hourly)) + // OR + // Minute: + // (\*|(?:\*|(?:[0-9]|(?:[1-5][0-9])))\/(?:[0-9]|(?:[1-5][0-9]))|(?:[0-9]|(?:[1-5][0-9]))(?:(?:\-[0-9]|\-(?:[1-5][0-9]))?|(?:\,(?:[0-9]|(?:[1-5][0-9])))*)) + // Hour: + // (\*|(?:\*|(?:\*|(?:[0-9]|1[0-9]|2[0-3])))\/(?:[0-9]|1[0-9]|2[0-3])|(?:[0-9]|1[0-9]|2[0-3])(?:(?:\-(?:[0-9]|1[0-9]|2[0-3]))?|(?:\,(?:[0-9]|1[0-9]|2[0-3]))*)) + // Day of the Month: + // (\*|(?:[1-9]|(?:[12][0-9])|3[01])(?:(?:\-(?:[1-9]|(?:[12][0-9])|3[01]))?|(?:\,(?:[1-9]|(?:[12][0-9])|3[01]))*)) + // Month: + // (\*|(?:[1-9]|1[012]|JAN|FEB|MAR|APR|MAY|JUN|JUL|AUG|SEP|OCT|NOV|DEC)(?:(?:\-(?:[1-9]|1[012]|JAN|FEB|MAR|APR|MAY|JUN|JUL|AUG|SEP|OCT|NOV|DEC))?|(?:\,(?:[1-9]|1[012]|JAN|FEB|MAR|APR|MAY|JUN|JUL|AUG|SEP|OCT|NOV|DEC))*)) + // Day of Week: + // (\*|(?:[0-6]|SUN|MON|TUE|WED|THU|FRI|SAT)(?:(?:\-(?:[0-6]|SUN|MON|TUE|WED|THU|FRI|SAT))?|(?:\,(?:[0-6]|SUN|MON|TUE|WED|THU|FRI|SAT))*)) + // + + // The time zone name for the given schedule, see https://en.wikipedia.org/wiki/List_of_tz_database_time_zones. + // If not specified, this will default to the time zone of the kube-controller-manager process. + // See https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#time-zones + // +kubebuilder:validation:Optional + // +optional + // +kubebuilder:validation:Pattern:=`^([A-Za-z_]+([+-]*0)*|[A-Za-z_]+(\/[A-Za-z_]+){1,2})(\/GMT[+-]\d{1,2})?$` + TimeZone string `json:"timeZone"` + + // Timezone regex breakdown: + // ([A-Za-z_]+([+-]*0)*|[A-Za-z_]+(/[A-Za-z_]+){1,2}) - Matches either: + // [A-Za-z_]+([+-]*0)* - One or more alphabetical characters (uppercase or lowercase) or underscores, followed by a +0 or -0 to account for GMT+0 or GMT-0 (for the first part of the timezone identifier). + // [A-Za-z_]+(/[A-Za-z_]+){1,2} - One or more alphabetical characters (uppercase or lowercase) or underscores, followed by one or two occurrences of a forward slash followed by one or more alphabetical characters or underscores. This allows for matching timezone identifiers with 2 or 3 parts, e.g America/Argentina/Buenos_Aires + // (/GMT[+-]\d{1,2})? - Makes the GMT offset suffix optional. It matches "/GMT" followed by either a plus ("+") or minus ("-") sign and one or two digits (the GMT offset) + + // RetentionPolicy defines the retention policy for retaining and deleting existing backups. + // +kubebuilder:validation:Optional + // +optional + RetentionPolicy RetentionPolicy `json:"retentionPolicy"` + + // PVCName specifies the name of the PersistentVolumeClaim (PVC) which binds a PersistentVolume where the + // etcd backup files would be saved + // The PVC itself must always be created in the "openshift-etcd" namespace + // If the PVC is left unspecified "" then the platform will choose a reasonable default location to save the backup. + // In the future this would be backups saved across the control-plane master nodes. + // +kubebuilder:validation:Optional + // +optional + PVCName string `json:"pvcName"` +} + +// RetentionType is the enumeration of valid retention policy types +// +enum +// +kubebuilder:validation:Enum:="RetentionNumber";"RetentionSize" +type RetentionType string + +const ( + // RetentionTypeNumber sets the retention policy based on the number of backup files saved + RetentionTypeNumber RetentionType = "RetentionNumber" + // RetentionTypeSize sets the retention policy based on the total size of the backup files saved + RetentionTypeSize RetentionType = "RetentionSize" +) + +// RetentionPolicy defines the retention policy for retaining and deleting existing backups. +// This struct is a discriminated union that allows users to select the type of retention policy from the supported types. +// +union +type RetentionPolicy struct { + // RetentionType sets the type of retention policy. + // Currently, the only valid policies are retention by number of backups (RetentionNumber), by the size of backups (RetentionSize). More policies or types may be added in the future. + // Empty string means no opinion and the platform is left to choose a reasonable default which is subject to change without notice. + // The current default is RetentionNumber with 15 backups kept. + // +unionDiscriminator + // +required + // +kubebuilder:validation:Required + // +kubebuilder:validation:Enum:="";"RetentionNumber";"RetentionSize" + RetentionType RetentionType `json:"retentionType"` + + // RetentionNumber configures the retention policy based on the number of backups + // +kubebuilder:validation:Optional + // +optional + RetentionNumber *RetentionNumberConfig `json:"retentionNumber,omitempty"` + + // RetentionSize configures the retention policy based on the size of backups + // +kubebuilder:validation:Optional + // +optional + RetentionSize *RetentionSizeConfig `json:"retentionSize,omitempty"` +} + +// RetentionNumberConfig specifies the configuration of the retention policy on the number of backups +type RetentionNumberConfig struct { + // MaxNumberOfBackups defines the maximum number of backups to retain. + // If the existing number of backups saved is equal to MaxNumberOfBackups then + // the oldest backup will be removed before a new backup is initiated. + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Required + // +required + MaxNumberOfBackups int `json:"maxNumberOfBackups,omitempty"` +} + +// RetentionSizeConfig specifies the configuration of the retention policy on the total size of backups +type RetentionSizeConfig struct { + // MaxSizeOfBackupsGb defines the total size in GB of backups to retain. + // If the current total size backups exceeds MaxSizeOfBackupsGb then + // the oldest backup will be removed before a new backup is initiated. + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Required + // +required + MaxSizeOfBackupsGb int `json:"maxSizeOfBackupsGb,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// BackupList is a collection of items +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +type BackupList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + Items []Backup `json:"items"` +} diff --git a/vendor/github.com/openshift/api/config/v1alpha1/types_insights.go b/vendor/github.com/openshift/api/config/v1alpha1/types_insights.go new file mode 100644 index 000000000..4dcdb2ec4 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1alpha1/types_insights.go @@ -0,0 +1,82 @@ +package v1alpha1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +// InsightsDataGather provides data gather configuration options for the the Insights Operator. +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +type InsightsDataGather struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration + // +kubebuilder:validation:Required + Spec InsightsDataGatherSpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status InsightsDataGatherStatus `json:"status"` +} + +type InsightsDataGatherSpec struct { + // gatherConfig spec attribute includes all the configuration options related to + // gathering of the Insights data and its uploading to the ingress. + // +optional + GatherConfig GatherConfig `json:"gatherConfig,omitempty"` +} + +type InsightsDataGatherStatus struct { +} + +// gatherConfig provides data gathering configuration options. +type GatherConfig struct { + // dataPolicy allows user to enable additional global obfuscation of the IP addresses and base domain + // in the Insights archive data. Valid values are "None" and "ObfuscateNetworking". + // When set to None the data is not obfuscated. + // When set to ObfuscateNetworking the IP addresses and the cluster domain name are obfuscated. + // When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. + // The current default is None. + // +optional + DataPolicy DataPolicy `json:"dataPolicy,omitempty"` + // disabledGatherers is a list of gatherers to be excluded from the gathering. All the gatherers can be disabled by providing "all" value. + // If all the gatherers are disabled, the Insights operator does not gather any data. + // The particular gatherers IDs can be found at https://github.com/openshift/insights-operator/blob/master/docs/gathered-data.md. + // Run the following command to get the names of last active gatherers: + // "oc get insightsoperators.operator.openshift.io cluster -o json | jq '.status.gatherStatus.gatherers[].name'" + // An example of disabling gatherers looks like this: `disabledGatherers: ["clusterconfig/machine_configs", "workloads/workload_info"]` + // +optional + DisabledGatherers []string `json:"disabledGatherers"` +} + +const ( + // No data obfuscation + NoPolicy DataPolicy = "None" + // IP addresses and cluster domain name are obfuscated + ObfuscateNetworking DataPolicy = "ObfuscateNetworking" +) + +// dataPolicy declares valid data policy types +// +kubebuilder:validation:Enum="";None;ObfuscateNetworking +type DataPolicy string + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// InsightsDataGatherList is a collection of items +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +type InsightsDataGatherList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + Items []InsightsDataGather `json:"items"` +} diff --git a/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 000000000..8cd8536f3 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,294 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Backup) DeepCopyInto(out *Backup) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Backup. +func (in *Backup) DeepCopy() *Backup { + if in == nil { + return nil + } + out := new(Backup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Backup) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupList) DeepCopyInto(out *BackupList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Backup, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupList. +func (in *BackupList) DeepCopy() *BackupList { + if in == nil { + return nil + } + out := new(BackupList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BackupList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupSpec) DeepCopyInto(out *BackupSpec) { + *out = *in + in.EtcdBackupSpec.DeepCopyInto(&out.EtcdBackupSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupSpec. +func (in *BackupSpec) DeepCopy() *BackupSpec { + if in == nil { + return nil + } + out := new(BackupSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupStatus) DeepCopyInto(out *BackupStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupStatus. +func (in *BackupStatus) DeepCopy() *BackupStatus { + if in == nil { + return nil + } + out := new(BackupStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EtcdBackupSpec) DeepCopyInto(out *EtcdBackupSpec) { + *out = *in + in.RetentionPolicy.DeepCopyInto(&out.RetentionPolicy) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdBackupSpec. +func (in *EtcdBackupSpec) DeepCopy() *EtcdBackupSpec { + if in == nil { + return nil + } + out := new(EtcdBackupSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GatherConfig) DeepCopyInto(out *GatherConfig) { + *out = *in + if in.DisabledGatherers != nil { + in, out := &in.DisabledGatherers, &out.DisabledGatherers + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GatherConfig. +func (in *GatherConfig) DeepCopy() *GatherConfig { + if in == nil { + return nil + } + out := new(GatherConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InsightsDataGather) DeepCopyInto(out *InsightsDataGather) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InsightsDataGather. +func (in *InsightsDataGather) DeepCopy() *InsightsDataGather { + if in == nil { + return nil + } + out := new(InsightsDataGather) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *InsightsDataGather) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InsightsDataGatherList) DeepCopyInto(out *InsightsDataGatherList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]InsightsDataGather, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InsightsDataGatherList. +func (in *InsightsDataGatherList) DeepCopy() *InsightsDataGatherList { + if in == nil { + return nil + } + out := new(InsightsDataGatherList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *InsightsDataGatherList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InsightsDataGatherSpec) DeepCopyInto(out *InsightsDataGatherSpec) { + *out = *in + in.GatherConfig.DeepCopyInto(&out.GatherConfig) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InsightsDataGatherSpec. +func (in *InsightsDataGatherSpec) DeepCopy() *InsightsDataGatherSpec { + if in == nil { + return nil + } + out := new(InsightsDataGatherSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InsightsDataGatherStatus) DeepCopyInto(out *InsightsDataGatherStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InsightsDataGatherStatus. +func (in *InsightsDataGatherStatus) DeepCopy() *InsightsDataGatherStatus { + if in == nil { + return nil + } + out := new(InsightsDataGatherStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RetentionNumberConfig) DeepCopyInto(out *RetentionNumberConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetentionNumberConfig. +func (in *RetentionNumberConfig) DeepCopy() *RetentionNumberConfig { + if in == nil { + return nil + } + out := new(RetentionNumberConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RetentionPolicy) DeepCopyInto(out *RetentionPolicy) { + *out = *in + if in.RetentionNumber != nil { + in, out := &in.RetentionNumber, &out.RetentionNumber + *out = new(RetentionNumberConfig) + **out = **in + } + if in.RetentionSize != nil { + in, out := &in.RetentionSize, &out.RetentionSize + *out = new(RetentionSizeConfig) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetentionPolicy. +func (in *RetentionPolicy) DeepCopy() *RetentionPolicy { + if in == nil { + return nil + } + out := new(RetentionPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RetentionSizeConfig) DeepCopyInto(out *RetentionSizeConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RetentionSizeConfig. +func (in *RetentionSizeConfig) DeepCopy() *RetentionSizeConfig { + if in == nil { + return nil + } + out := new(RetentionSizeConfig) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.swagger_doc_generated.go new file mode 100644 index 000000000..425c1ec51 --- /dev/null +++ b/vendor/github.com/openshift/api/config/v1alpha1/zz_generated.swagger_doc_generated.go @@ -0,0 +1,121 @@ +package v1alpha1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_Backup = map[string]string{ + "": "\n\nBackup provides configuration for performing backups of the openshift cluster.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (Backup) SwaggerDoc() map[string]string { + return map_Backup +} + +var map_BackupList = map[string]string{ + "": "BackupList is a collection of items\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (BackupList) SwaggerDoc() map[string]string { + return map_BackupList +} + +var map_BackupSpec = map[string]string{ + "etcd": "etcd specifies the configuration for periodic backups of the etcd cluster", +} + +func (BackupSpec) SwaggerDoc() map[string]string { + return map_BackupSpec +} + +var map_EtcdBackupSpec = map[string]string{ + "": "EtcdBackupSpec provides configuration for automated etcd backups to the cluster-etcd-operator", + "schedule": "Schedule defines the recurring backup schedule in Cron format every 2 hours: 0 */2 * * * every day at 3am: 0 3 * * * Empty string means no opinion and the platform is left to choose a reasonable default which is subject to change without notice. The current default is \"no backups\", but will change in the future.", + "timeZone": "The time zone name for the given schedule, see https://en.wikipedia.org/wiki/List_of_tz_database_time_zones. If not specified, this will default to the time zone of the kube-controller-manager process. See https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#time-zones", + "retentionPolicy": "RetentionPolicy defines the retention policy for retaining and deleting existing backups.", + "pvcName": "PVCName specifies the name of the PersistentVolumeClaim (PVC) which binds a PersistentVolume where the etcd backup files would be saved The PVC itself must always be created in the \"openshift-etcd\" namespace If the PVC is left unspecified \"\" then the platform will choose a reasonable default location to save the backup. In the future this would be backups saved across the control-plane master nodes.", +} + +func (EtcdBackupSpec) SwaggerDoc() map[string]string { + return map_EtcdBackupSpec +} + +var map_RetentionNumberConfig = map[string]string{ + "": "RetentionNumberConfig specifies the configuration of the retention policy on the number of backups", + "maxNumberOfBackups": "MaxNumberOfBackups defines the maximum number of backups to retain. If the existing number of backups saved is equal to MaxNumberOfBackups then the oldest backup will be removed before a new backup is initiated.", +} + +func (RetentionNumberConfig) SwaggerDoc() map[string]string { + return map_RetentionNumberConfig +} + +var map_RetentionPolicy = map[string]string{ + "": "RetentionPolicy defines the retention policy for retaining and deleting existing backups. This struct is a discriminated union that allows users to select the type of retention policy from the supported types.", + "retentionType": "RetentionType sets the type of retention policy. Currently, the only valid policies are retention by number of backups (RetentionNumber), by the size of backups (RetentionSize). More policies or types may be added in the future. Empty string means no opinion and the platform is left to choose a reasonable default which is subject to change without notice. The current default is RetentionNumber with 15 backups kept.", + "retentionNumber": "RetentionNumber configures the retention policy based on the number of backups", + "retentionSize": "RetentionSize configures the retention policy based on the size of backups", +} + +func (RetentionPolicy) SwaggerDoc() map[string]string { + return map_RetentionPolicy +} + +var map_RetentionSizeConfig = map[string]string{ + "": "RetentionSizeConfig specifies the configuration of the retention policy on the total size of backups", + "maxSizeOfBackupsGb": "MaxSizeOfBackupsGb defines the total size in GB of backups to retain. If the current total size backups exceeds MaxSizeOfBackupsGb then the oldest backup will be removed before a new backup is initiated.", +} + +func (RetentionSizeConfig) SwaggerDoc() map[string]string { + return map_RetentionSizeConfig +} + +var map_GatherConfig = map[string]string{ + "": "gatherConfig provides data gathering configuration options.", + "dataPolicy": "dataPolicy allows user to enable additional global obfuscation of the IP addresses and base domain in the Insights archive data. Valid values are \"None\" and \"ObfuscateNetworking\". When set to None the data is not obfuscated. When set to ObfuscateNetworking the IP addresses and the cluster domain name are obfuscated. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default is None.", + "disabledGatherers": "disabledGatherers is a list of gatherers to be excluded from the gathering. All the gatherers can be disabled by providing \"all\" value. If all the gatherers are disabled, the Insights operator does not gather any data. The particular gatherers IDs can be found at https://github.com/openshift/insights-operator/blob/master/docs/gathered-data.md. Run the following command to get the names of last active gatherers: \"oc get insightsoperators.operator.openshift.io cluster -o json | jq '.status.gatherStatus.gatherers[].name'\" An example of disabling gatherers looks like this: `disabledGatherers: [\"clusterconfig/machine_configs\", \"workloads/workload_info\"]`", +} + +func (GatherConfig) SwaggerDoc() map[string]string { + return map_GatherConfig +} + +var map_InsightsDataGather = map[string]string{ + "": "\n\nInsightsDataGather provides data gather configuration options for the the Insights Operator.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (InsightsDataGather) SwaggerDoc() map[string]string { + return map_InsightsDataGather +} + +var map_InsightsDataGatherList = map[string]string{ + "": "InsightsDataGatherList is a collection of items\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (InsightsDataGatherList) SwaggerDoc() map[string]string { + return map_InsightsDataGatherList +} + +var map_InsightsDataGatherSpec = map[string]string{ + "gatherConfig": "gatherConfig spec attribute includes all the configuration options related to gathering of the Insights data and its uploading to the ingress.", +} + +func (InsightsDataGatherSpec) SwaggerDoc() map[string]string { + return map_InsightsDataGatherSpec +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/api/console/.codegen.yaml b/vendor/github.com/openshift/api/console/.codegen.yaml new file mode 100644 index 000000000..ffa2c8d9b --- /dev/null +++ b/vendor/github.com/openshift/api/console/.codegen.yaml @@ -0,0 +1,2 @@ +swaggerdocs: + commentPolicy: Warn diff --git a/vendor/github.com/openshift/api/console/OWNERS b/vendor/github.com/openshift/api/console/OWNERS new file mode 100644 index 000000000..d39278070 --- /dev/null +++ b/vendor/github.com/openshift/api/console/OWNERS @@ -0,0 +1,3 @@ +reviewers: + - jhadvig + - spadgett diff --git a/vendor/github.com/openshift/api/console/install.go b/vendor/github.com/openshift/api/console/install.go new file mode 100644 index 000000000..bf87abbf5 --- /dev/null +++ b/vendor/github.com/openshift/api/console/install.go @@ -0,0 +1,27 @@ +package console + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + consolev1 "github.com/openshift/api/console/v1" + consolev1alpha1 "github.com/openshift/api/console/v1alpha1" +) + +const ( + GroupName = "console.openshift.io" +) + +var ( + schemeBuilder = runtime.NewSchemeBuilder(consolev1alpha1.Install, consolev1.Install) + // Install is a function which adds every version of this group to a scheme + Install = schemeBuilder.AddToScheme +) + +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +func Kind(kind string) schema.GroupKind { + return schema.GroupKind{Group: GroupName, Kind: kind} +} diff --git a/vendor/github.com/openshift/api/console/v1/00_consoleclidownload.crd.yaml b/vendor/github.com/openshift/api/console/v1/00_consoleclidownload.crd.yaml new file mode 100644 index 000000000..4595ef470 --- /dev/null +++ b/vendor/github.com/openshift/api/console/v1/00_consoleclidownload.crd.yaml @@ -0,0 +1,77 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/481 + capability.openshift.io/name: Console + description: Extension for configuring openshift web console command line interface (CLI) downloads. + displayName: ConsoleCLIDownload + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: consoleclidownloads.console.openshift.io +spec: + group: console.openshift.io + names: + kind: ConsoleCLIDownload + listKind: ConsoleCLIDownloadList + plural: consoleclidownloads + singular: consoleclidownload + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .spec.displayName + name: Display name + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: string + name: v1 + schema: + openAPIV3Schema: + description: "ConsoleCLIDownload is an extension for configuring openshift web console command line interface (CLI) downloads. \n Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer)." + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ConsoleCLIDownloadSpec is the desired cli download configuration. + type: object + required: + - description + - displayName + - links + properties: + description: + description: description is the description of the CLI download (can include markdown). + type: string + displayName: + description: displayName is the display name of the CLI download. + type: string + links: + description: links is a list of objects that provide CLI download link details. + type: array + items: + type: object + required: + - href + properties: + href: + description: href is the absolute secure URL for the link (must use https) + type: string + pattern: ^https:// + text: + description: text is the display text for the link + type: string + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/console/v1/00_consoleexternalloglink.crd.yaml b/vendor/github.com/openshift/api/console/v1/00_consoleexternalloglink.crd.yaml new file mode 100644 index 000000000..21613efcb --- /dev/null +++ b/vendor/github.com/openshift/api/console/v1/00_consoleexternalloglink.crd.yaml @@ -0,0 +1,68 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/481 + capability.openshift.io/name: Console + description: ConsoleExternalLogLink is an extension for customizing OpenShift web console log links. + displayName: ConsoleExternalLogLinks + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: consoleexternalloglinks.console.openshift.io +spec: + group: console.openshift.io + names: + kind: ConsoleExternalLogLink + listKind: ConsoleExternalLogLinkList + plural: consoleexternalloglinks + singular: consoleexternalloglink + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .spec.text + name: Text + type: string + - jsonPath: .spec.hrefTemplate + name: HrefTemplate + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: "ConsoleExternalLogLink is an extension for customizing OpenShift web console log links. \n Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer)." + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ConsoleExternalLogLinkSpec is the desired log link configuration. The log link will appear on the logs tab of the pod details page. + type: object + required: + - hrefTemplate + - text + properties: + hrefTemplate: + description: "hrefTemplate is an absolute secure URL (must use https) for the log link including variables to be replaced. Variables are specified in the URL with the format ${variableName}, for instance, ${containerName} and will be replaced with the corresponding values from the resource. Resource is a pod. Supported variables are: - ${resourceName} - name of the resource which containes the logs - ${resourceUID} - UID of the resource which contains the logs - e.g. `11111111-2222-3333-4444-555555555555` - ${containerName} - name of the resource's container that contains the logs - ${resourceNamespace} - namespace of the resource that contains the logs - ${resourceNamespaceUID} - namespace UID of the resource that contains the logs - ${podLabels} - JSON representation of labels matching the pod with the logs - e.g. `{\"key1\":\"value1\",\"key2\":\"value2\"}` \n e.g., https://example.com/logs?resourceName=${resourceName}&containerName=${containerName}&resourceNamespace=${resourceNamespace}&podLabels=${podLabels}" + type: string + pattern: ^https:// + namespaceFilter: + description: namespaceFilter is a regular expression used to restrict a log link to a matching set of namespaces (e.g., `^openshift-`). The string is converted into a regular expression using the JavaScript RegExp constructor. If not specified, links will be displayed for all the namespaces. + type: string + text: + description: text is the display text for the link + type: string + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/console/v1/00_consolelink.crd.yaml b/vendor/github.com/openshift/api/console/v1/00_consolelink.crd.yaml new file mode 100644 index 000000000..766ef66d9 --- /dev/null +++ b/vendor/github.com/openshift/api/console/v1/00_consolelink.crd.yaml @@ -0,0 +1,125 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/481 + capability.openshift.io/name: Console + description: Extension for customizing OpenShift web console links + displayName: ConsoleLinks + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: consolelinks.console.openshift.io +spec: + group: console.openshift.io + names: + kind: ConsoleLink + listKind: ConsoleLinkList + plural: consolelinks + singular: consolelink + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .spec.text + name: Text + type: string + - jsonPath: .spec.href + name: URL + type: string + - jsonPath: .spec.menu + name: Menu + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: "ConsoleLink is an extension for customizing OpenShift web console links. \n Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer)." + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ConsoleLinkSpec is the desired console link configuration. + type: object + required: + - href + - location + - text + properties: + applicationMenu: + description: applicationMenu holds information about section and icon used for the link in the application menu, and it is applicable only when location is set to ApplicationMenu. + type: object + required: + - section + properties: + imageURL: + description: imageUrl is the URL for the icon used in front of the link in the application menu. The URL must be an HTTPS URL or a Data URI. The image should be square and will be shown at 24x24 pixels. + type: string + section: + description: section is the section of the application menu in which the link should appear. This can be any text that will appear as a subheading in the application menu dropdown. A new section will be created if the text does not match text of an existing section. + type: string + href: + description: href is the absolute secure URL for the link (must use https) + type: string + pattern: ^https:// + location: + description: location determines which location in the console the link will be appended to (ApplicationMenu, HelpMenu, UserMenu, NamespaceDashboard). + type: string + pattern: ^(ApplicationMenu|HelpMenu|UserMenu|NamespaceDashboard)$ + namespaceDashboard: + description: namespaceDashboard holds information about namespaces in which the dashboard link should appear, and it is applicable only when location is set to NamespaceDashboard. If not specified, the link will appear in all namespaces. + type: object + properties: + namespaceSelector: + description: namespaceSelector is used to select the Namespaces that should contain dashboard link by label. If the namespace labels match, dashboard link will be shown for the namespaces. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaces: + description: namespaces is an array of namespace names in which the dashboard link should appear. + type: array + items: + type: string + text: + description: text is the display text for the link + type: string + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/console/v1/00_consolenotification.crd.yaml b/vendor/github.com/openshift/api/console/v1/00_consolenotification.crd.yaml new file mode 100644 index 000000000..9206ebcbc --- /dev/null +++ b/vendor/github.com/openshift/api/console/v1/00_consolenotification.crd.yaml @@ -0,0 +1,84 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/481 + capability.openshift.io/name: Console + description: Extension for configuring openshift web console notifications. + displayName: ConsoleNotification + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: consolenotifications.console.openshift.io +spec: + group: console.openshift.io + names: + kind: ConsoleNotification + listKind: ConsoleNotificationList + plural: consolenotifications + singular: consolenotification + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .spec.text + name: Text + type: string + - jsonPath: .spec.location + name: Location + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: "ConsoleNotification is the extension for configuring openshift web console notifications. \n Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer)." + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ConsoleNotificationSpec is the desired console notification configuration. + type: object + required: + - text + properties: + backgroundColor: + description: backgroundColor is the color of the background for the notification as CSS data type color. + type: string + color: + description: color is the color of the text for the notification as CSS data type color. + type: string + link: + description: link is an object that holds notification link details. + type: object + required: + - href + - text + properties: + href: + description: href is the absolute secure URL for the link (must use https) + type: string + pattern: ^https:// + text: + description: text is the display text for the link + type: string + location: + description: 'location is the location of the notification in the console. Valid values are: "BannerTop", "BannerBottom", "BannerTopBottom".' + type: string + pattern: ^(BannerTop|BannerBottom|BannerTopBottom)$ + text: + description: text is the visible text of the notification. + type: string + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/console/v1/00_consolequickstart.crd.yaml b/vendor/github.com/openshift/api/console/v1/00_consolequickstart.crd.yaml new file mode 100644 index 000000000..c2670d9a8 --- /dev/null +++ b/vendor/github.com/openshift/api/console/v1/00_consolequickstart.crd.yaml @@ -0,0 +1,165 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/750 + capability.openshift.io/name: Console + description: Extension for guiding user through various workflows in the OpenShift web console. + displayName: ConsoleQuickStart + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: consolequickstarts.console.openshift.io +spec: + group: console.openshift.io + names: + kind: ConsoleQuickStart + listKind: ConsoleQuickStartList + plural: consolequickstarts + singular: consolequickstart + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "ConsoleQuickStart is an extension for guiding user through various workflows in the OpenShift web console. \n Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer)." + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ConsoleQuickStartSpec is the desired quick start configuration. + type: object + required: + - description + - displayName + - durationMinutes + - introduction + - tasks + properties: + accessReviewResources: + description: accessReviewResources contains a list of resources that the user's access will be reviewed against in order for the user to complete the Quick Start. The Quick Start will be hidden if any of the access reviews fail. + type: array + items: + description: ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface + type: object + properties: + group: + description: Group is the API Group of the Resource. "*" means all. + type: string + name: + description: Name is the name of the resource being requested for a "get" or deleted for a "delete". "" (empty) means all. + type: string + namespace: + description: Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces "" (empty) is defaulted for LocalSubjectAccessReviews "" (empty) is empty for cluster-scoped resources "" (empty) means "all" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview + type: string + resource: + description: Resource is one of the existing resource types. "*" means all. + type: string + subresource: + description: Subresource is one of the existing resource types. "" means none. + type: string + verb: + description: 'Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy. "*" means all.' + type: string + version: + description: Version is the API Version of the Resource. "*" means all. + type: string + conclusion: + description: conclusion sums up the Quick Start and suggests the possible next steps. (includes markdown) + type: string + description: + description: description is the description of the Quick Start. (includes markdown) + type: string + maxLength: 256 + minLength: 1 + displayName: + description: displayName is the display name of the Quick Start. + type: string + minLength: 1 + durationMinutes: + description: durationMinutes describes approximately how many minutes it will take to complete the Quick Start. + type: integer + minimum: 1 + icon: + description: icon is a base64 encoded image that will be displayed beside the Quick Start display name. The icon should be an vector image for easy scaling. The size of the icon should be 40x40. + type: string + introduction: + description: introduction describes the purpose of the Quick Start. (includes markdown) + type: string + minLength: 1 + nextQuickStart: + description: nextQuickStart is a list of the following Quick Starts, suggested for the user to try. + type: array + items: + type: string + prerequisites: + description: prerequisites contains all prerequisites that need to be met before taking a Quick Start. (includes markdown) + type: array + items: + type: string + tags: + description: tags is a list of strings that describe the Quick Start. + type: array + items: + type: string + tasks: + description: tasks is the list of steps the user has to perform to complete the Quick Start. + type: array + minItems: 1 + items: + description: ConsoleQuickStartTask is a single step in a Quick Start. + type: object + required: + - description + - title + properties: + description: + description: description describes the steps needed to complete the task. (includes markdown) + type: string + minLength: 1 + review: + description: review contains instructions to validate the task is complete. The user will select 'Yes' or 'No'. using a radio button, which indicates whether the step was completed successfully. + type: object + required: + - failedTaskHelp + - instructions + properties: + failedTaskHelp: + description: failedTaskHelp contains suggestions for a failed task review and is shown at the end of task. (includes markdown) + type: string + minLength: 1 + instructions: + description: instructions contains steps that user needs to take in order to validate his work after going through a task. (includes markdown) + type: string + minLength: 1 + summary: + description: summary contains information about the passed step. + type: object + required: + - failed + - success + properties: + failed: + description: failed briefly describes the unsuccessfully passed task. (includes markdown) + type: string + maxLength: 128 + minLength: 1 + success: + description: success describes the succesfully passed task. + type: string + minLength: 1 + title: + description: title describes the task and is displayed as a step heading. + type: string + minLength: 1 + served: true + storage: true diff --git a/vendor/github.com/openshift/api/console/v1/00_consolesample.crd.yaml b/vendor/github.com/openshift/api/console/v1/00_consolesample.crd.yaml new file mode 100644 index 000000000..a7234f556 --- /dev/null +++ b/vendor/github.com/openshift/api/console/v1/00_consolesample.crd.yaml @@ -0,0 +1,167 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/481 + capability.openshift.io/name: Console + description: ConsoleSample is an extension to customizing OpenShift web console by adding samples. + displayName: ConsoleSample + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: consolesamples.console.openshift.io +spec: + group: console.openshift.io + names: + kind: ConsoleSample + listKind: ConsoleSampleList + plural: consolesamples + singular: consolesample + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "ConsoleSample is an extension to customizing OpenShift web console by adding samples. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + type: object + required: + - metadata + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec contains configuration for a console sample. + type: object + required: + - abstract + - description + - source + - title + properties: + abstract: + description: "abstract is a short introduction to the sample. \n It is required and must be no more than 100 characters in length. \n The abstract is shown on the sample card tile below the title and provider and is limited to three lines of content." + type: string + maxLength: 100 + description: + description: "description is a long form explanation of the sample. \n It is required and can have a maximum length of **4096** characters. \n It is a README.md-like content for additional information, links, pre-conditions, and other instructions. It will be rendered as Markdown so that it can contain line breaks, links, and other simple formatting." + type: string + maxLength: 4096 + icon: + description: "icon is an optional base64 encoded image and shown beside the sample title. \n The format must follow the data: URL format and can have a maximum size of **10 KB**. \n data:[][;base64], \n For example: \n data:image;base64, plus the base64 encoded image. \n Vector images can also be used. SVG icons must start with: \n data:image/svg+xml;base64, plus the base64 encoded SVG image. \n All sample catalog icons will be shown on a white background (also when the dark theme is used). The web console ensures that different aspect ratios work correctly. Currently, the surface of the icon is at most 40x100px. \n For more information on the data URL format, please visit https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/Data_URLs." + type: string + maxLength: 14000 + pattern: ^data:([a-z/\.+0-9]*;(([-a-zA-Z0-9=])*;)?)?base64, + provider: + description: "provider is an optional label to honor who provides the sample. \n It is optional and must be no more than 50 characters in length. \n A provider can be a company like \"Red Hat\" or an organization like \"CNCF\" or \"Knative\". \n Currently, the provider is only shown on the sample card tile below the title with the prefix \"Provided by \"" + type: string + maxLength: 50 + source: + description: source defines where to deploy the sample service from. The sample may be sourced from an external git repository or container image. + type: object + required: + - type + properties: + containerImport: + description: containerImport allows the user import a container image. + type: object + required: + - image + properties: + image: + description: "reference to a container image that provides a HTTP service. The service must be exposed on the default port (8080) unless otherwise configured with the port field. \n Supported formats: - / - docker.io// - quay.io// - quay.io//@sha256: - quay.io//:" + type: string + maxLength: 256 + minLength: 1 + service: + description: service contains configuration for the Service resource created for this sample. + type: object + default: + targetPort: 8080 + properties: + targetPort: + description: targetPort is the port that the service listens on for HTTP requests. This port will be used for Service and Route created for this sample. Port must be in the range 1 to 65535. Default port is 8080. + type: integer + format: int32 + default: 8080 + maximum: 65535 + minimum: 1 + gitImport: + description: gitImport allows the user to import code from a git repository. + type: object + required: + - repository + properties: + repository: + description: repository contains the reference to the actual Git repository. + type: object + required: + - url + properties: + contextDir: + description: contextDir is used to specify a directory within the repository to build the component. Must start with `/` and have a maximum length of 256 characters. When omitted, the default value is to build from the root of the repository. + type: string + maxLength: 256 + pattern: ^/ + revision: + description: revision is the git revision at which to clone the git repository Can be used to clone a specific branch, tag or commit SHA. Must be at most 256 characters in length. When omitted the repository's default branch is used. + type: string + maxLength: 256 + url: + description: "url of the Git repository that contains a HTTP service. The HTTP service must be exposed on the default port (8080) unless otherwise configured with the port field. \n Only public repositories on GitHub, GitLab and Bitbucket are currently supported: \n - https://github.com// - https://gitlab.com// - https://bitbucket.org// \n The url must have a maximum length of 256 characters." + type: string + maxLength: 256 + minLength: 1 + pattern: ^https:\/\/(github.com|gitlab.com|bitbucket.org)\/[a-zA-Z0-9-]+\/[a-zA-Z0-9-]+(.git)?$ + service: + description: service contains configuration for the Service resource created for this sample. + type: object + default: + targetPort: 8080 + properties: + targetPort: + description: targetPort is the port that the service listens on for HTTP requests. This port will be used for Service created for this sample. Port must be in the range 1 to 65535. Default port is 8080. + type: integer + format: int32 + default: 8080 + maximum: 65535 + minimum: 1 + type: + description: 'type of the sample, currently supported: "GitImport";"ContainerImport"' + type: string + allOf: + - enum: + - GitImport + - ContainerImport + - enum: + - GitImport + - ContainerImport + x-kubernetes-validations: + - rule: 'self.type == ''GitImport'' ? has(self.gitImport) : !has(self.gitImport)' + message: source.gitImport is required when source.type is GitImport, and forbidden otherwise + - rule: 'self.type == ''ContainerImport'' ? has(self.containerImport) : !has(self.containerImport)' + message: source.containerImport is required when source.type is ContainerImport, and forbidden otherwise + tags: + description: "tags are optional string values that can be used to find samples in the samples catalog. \n Examples of common tags may be \"Java\", \"Quarkus\", etc. \n They will be displayed on the samples details page." + type: array + maxItems: 10 + items: + type: string + x-kubernetes-list-type: set + title: + description: "title is the display name of the sample. \n It is required and must be no more than 50 characters in length." + type: string + maxLength: 50 + minLength: 1 + type: + description: "type is an optional label to group multiple samples. \n It is optional and must be no more than 20 characters in length. \n Recommendation is a singular term like \"Builder Image\", \"Devfile\" or \"Serverless Function\". \n Currently, the type is shown a badge on the sample card tile in the top right corner." + type: string + maxLength: 20 + served: true + storage: true diff --git a/vendor/github.com/openshift/api/console/v1/00_consoleyamlsample.crd.yaml b/vendor/github.com/openshift/api/console/v1/00_consoleyamlsample.crd.yaml new file mode 100644 index 000000000..c9bebdfb1 --- /dev/null +++ b/vendor/github.com/openshift/api/console/v1/00_consoleyamlsample.crd.yaml @@ -0,0 +1,74 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/481 + capability.openshift.io/name: Console + description: Extension for configuring openshift web console YAML samples. + displayName: ConsoleYAMLSample + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: consoleyamlsamples.console.openshift.io +spec: + group: console.openshift.io + names: + kind: ConsoleYAMLSample + listKind: ConsoleYAMLSampleList + plural: consoleyamlsamples + singular: consoleyamlsample + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "ConsoleYAMLSample is an extension for customizing OpenShift web console YAML samples. \n Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer)." + type: object + required: + - metadata + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ConsoleYAMLSampleSpec is the desired YAML sample configuration. Samples will appear with their descriptions in a samples sidebar when creating a resources in the web console. + type: object + required: + - description + - targetResource + - title + - yaml + properties: + description: + description: description of the YAML sample. + type: string + pattern: ^(.|\s)*\S(.|\s)*$ + snippet: + description: snippet indicates that the YAML sample is not the full YAML resource definition, but a fragment that can be inserted into the existing YAML document at the user's cursor. + type: boolean + targetResource: + description: targetResource contains apiVersion and kind of the resource YAML sample is representating. + type: object + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + title: + description: title of the YAML sample. + type: string + pattern: ^(.|\s)*\S(.|\s)*$ + yaml: + description: yaml is the YAML sample to display. + type: string + pattern: ^(.|\s)*\S(.|\s)*$ + served: true + storage: true diff --git a/vendor/github.com/openshift/api/console/v1/90_consoleplugin.crd.yaml b/vendor/github.com/openshift/api/console/v1/90_consoleplugin.crd.yaml new file mode 100644 index 000000000..5734ebe0b --- /dev/null +++ b/vendor/github.com/openshift/api/console/v1/90_consoleplugin.crd.yaml @@ -0,0 +1,294 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/1186 + capability.openshift.io/name: Console + description: Extension for configuring openshift web console plugins. + displayName: ConsolePlugin + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + service.beta.openshift.io/inject-cabundle: "true" + name: consoleplugins.console.openshift.io +spec: + conversion: + strategy: Webhook + webhook: + clientConfig: + service: + name: webhook + namespace: openshift-console-operator + path: /crdconvert + port: 9443 + conversionReviewVersions: + - v1 + - v1alpha1 + group: console.openshift.io + names: + kind: ConsolePlugin + listKind: ConsolePluginList + plural: consoleplugins + singular: consoleplugin + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "ConsolePlugin is an extension for customizing OpenShift web console by dynamically loading code from another service running on the cluster. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + type: object + required: + - metadata + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ConsolePluginSpec is the desired plugin configuration. + type: object + required: + - backend + - displayName + properties: + backend: + description: backend holds the configuration of backend which is serving console's plugin . + type: object + required: + - type + properties: + service: + description: service is a Kubernetes Service that exposes the plugin using a deployment with an HTTP server. The Service must use HTTPS and Service serving certificate. The console backend will proxy the plugins assets from the Service using the service CA bundle. + type: object + required: + - name + - namespace + - port + properties: + basePath: + description: basePath is the path to the plugin's assets. The primary asset it the manifest file called `plugin-manifest.json`, which is a JSON document that contains metadata about the plugin and the extensions. + type: string + default: / + maxLength: 256 + minLength: 1 + pattern: ^[a-zA-Z0-9.\-_~!$&'()*+,;=:@\/]*$ + name: + description: name of Service that is serving the plugin assets. + type: string + maxLength: 128 + minLength: 1 + namespace: + description: namespace of Service that is serving the plugin assets. + type: string + maxLength: 128 + minLength: 1 + port: + description: port on which the Service that is serving the plugin is listening to. + type: integer + format: int32 + maximum: 65535 + minimum: 1 + type: + description: "type is the backend type which servers the console's plugin. Currently only \"Service\" is supported. \n ---" + type: string + enum: + - Service + displayName: + description: displayName is the display name of the plugin. The dispalyName should be between 1 and 128 characters. + type: string + maxLength: 128 + minLength: 1 + i18n: + description: i18n is the configuration of plugin's localization resources. + type: object + required: + - loadType + properties: + loadType: + description: loadType indicates how the plugin's localization resource should be loaded. Valid values are Preload, Lazy and the empty string. When set to Preload, all localization resources are fetched when the plugin is loaded. When set to Lazy, localization resources are lazily loaded as and when they are required by the console. When omitted or set to the empty string, the behaviour is equivalent to Lazy type. + type: string + enum: + - Preload + - Lazy + - "" + proxy: + description: proxy is a list of proxies that describe various service type to which the plugin needs to connect to. + type: array + items: + description: ConsolePluginProxy holds information on various service types to which console's backend will proxy the plugin's requests. + type: object + required: + - alias + - endpoint + properties: + alias: + description: "alias is a proxy name that identifies the plugin's proxy. An alias name should be unique per plugin. The console backend exposes following proxy endpoint: \n /api/proxy/plugin///? \n Request example path: \n /api/proxy/plugin/acm/search/pods?namespace=openshift-apiserver" + type: string + maxLength: 128 + minLength: 1 + pattern: ^[A-Za-z0-9-_]+$ + authorization: + description: authorization provides information about authorization type, which the proxied request should contain + type: string + default: None + enum: + - UserToken + - None + caCertificate: + description: caCertificate provides the cert authority certificate contents, in case the proxied Service is using custom service CA. By default, the service CA bundle provided by the service-ca operator is used. + type: string + pattern: ^-----BEGIN CERTIFICATE-----([\s\S]*)-----END CERTIFICATE-----\s?$ + endpoint: + description: endpoint provides information about endpoint to which the request is proxied to. + type: object + required: + - type + properties: + service: + description: 'service is an in-cluster Service that the plugin will connect to. The Service must use HTTPS. The console backend exposes an endpoint in order to proxy communication between the plugin and the Service. Note: service field is required for now, since currently only "Service" type is supported.' + type: object + required: + - name + - namespace + - port + properties: + name: + description: name of Service that the plugin needs to connect to. + type: string + maxLength: 128 + minLength: 1 + namespace: + description: namespace of Service that the plugin needs to connect to + type: string + maxLength: 128 + minLength: 1 + port: + description: port on which the Service that the plugin needs to connect to is listening on. + type: integer + format: int32 + maximum: 65535 + minimum: 1 + type: + description: "type is the type of the console plugin's proxy. Currently only \"Service\" is supported. \n ---" + type: string + enum: + - Service + served: true + storage: false + - name: v1alpha1 + schema: + openAPIV3Schema: + description: "ConsolePlugin is an extension for customizing OpenShift web console by dynamically loading code from another service running on the cluster. \n Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support." + type: object + required: + - metadata + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ConsolePluginSpec is the desired plugin configuration. + type: object + required: + - service + properties: + displayName: + description: displayName is the display name of the plugin. + type: string + minLength: 1 + proxy: + description: proxy is a list of proxies that describe various service type to which the plugin needs to connect to. + type: array + items: + description: ConsolePluginProxy holds information on various service types to which console's backend will proxy the plugin's requests. + type: object + required: + - alias + - type + properties: + alias: + description: "alias is a proxy name that identifies the plugin's proxy. An alias name should be unique per plugin. The console backend exposes following proxy endpoint: \n /api/proxy/plugin///? \n Request example path: \n /api/proxy/plugin/acm/search/pods?namespace=openshift-apiserver" + type: string + maxLength: 128 + minLength: 1 + pattern: ^[A-Za-z0-9-_]+$ + authorize: + description: "authorize indicates if the proxied request should contain the logged-in user's OpenShift access token in the \"Authorization\" request header. For example: \n Authorization: Bearer sha256~kV46hPnEYhCWFnB85r5NrprAxggzgb6GOeLbgcKNsH0 \n By default the access token is not part of the proxied request." + type: boolean + default: false + caCertificate: + description: caCertificate provides the cert authority certificate contents, in case the proxied Service is using custom service CA. By default, the service CA bundle provided by the service-ca operator is used. + type: string + pattern: ^-----BEGIN CERTIFICATE-----([\s\S]*)-----END CERTIFICATE-----\s?$ + service: + description: 'service is an in-cluster Service that the plugin will connect to. The Service must use HTTPS. The console backend exposes an endpoint in order to proxy communication between the plugin and the Service. Note: service field is required for now, since currently only "Service" type is supported.' + type: object + required: + - name + - namespace + - port + properties: + name: + description: name of Service that the plugin needs to connect to. + type: string + maxLength: 128 + minLength: 1 + namespace: + description: namespace of Service that the plugin needs to connect to + type: string + maxLength: 128 + minLength: 1 + port: + description: port on which the Service that the plugin needs to connect to is listening on. + type: integer + format: int32 + maximum: 65535 + minimum: 1 + type: + description: type is the type of the console plugin's proxy. Currently only "Service" is supported. + type: string + pattern: ^(Service)$ + service: + description: service is a Kubernetes Service that exposes the plugin using a deployment with an HTTP server. The Service must use HTTPS and Service serving certificate. The console backend will proxy the plugins assets from the Service using the service CA bundle. + type: object + required: + - basePath + - name + - namespace + - port + properties: + basePath: + description: basePath is the path to the plugin's assets. The primary asset it the manifest file called `plugin-manifest.json`, which is a JSON document that contains metadata about the plugin and the extensions. + type: string + default: / + minLength: 1 + pattern: ^/ + name: + description: name of Service that is serving the plugin assets. + type: string + maxLength: 128 + minLength: 1 + namespace: + description: namespace of Service that is serving the plugin assets. + type: string + maxLength: 128 + minLength: 1 + port: + description: port on which the Service that is serving the plugin is listening to. + type: integer + format: int32 + maximum: 65535 + minimum: 1 + served: true + storage: true diff --git a/vendor/github.com/openshift/api/console/v1/Makefile b/vendor/github.com/openshift/api/console/v1/Makefile new file mode 100644 index 000000000..8c350e0a4 --- /dev/null +++ b/vendor/github.com/openshift/api/console/v1/Makefile @@ -0,0 +1,3 @@ +.PHONY: test +test: + make -C ../../tests test GINKGO_EXTRA_ARGS=--focus="console.openshift.io/v1" diff --git a/vendor/github.com/openshift/api/console/v1/doc.go b/vendor/github.com/openshift/api/console/v1/doc.go new file mode 100644 index 000000000..c08b5b519 --- /dev/null +++ b/vendor/github.com/openshift/api/console/v1/doc.go @@ -0,0 +1,7 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true + +// +groupName=console.openshift.io +// Package v1 is the v1 version of the API. +package v1 diff --git a/vendor/github.com/openshift/api/console/v1/register.go b/vendor/github.com/openshift/api/console/v1/register.go new file mode 100644 index 000000000..22319469f --- /dev/null +++ b/vendor/github.com/openshift/api/console/v1/register.go @@ -0,0 +1,53 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + GroupName = "console.openshift.io" + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, corev1.AddToScheme) + // Install is a function which adds this version to a scheme + Install = schemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = schemeBuilder.AddToScheme +) + +// Resource generated code relies on this being here, but it logically belongs to the group +// DEPRECATED +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +// addKnownTypes adds types to API group +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &ConsoleCLIDownload{}, + &ConsoleCLIDownloadList{}, + &ConsoleExternalLogLink{}, + &ConsoleExternalLogLinkList{}, + &ConsoleLink{}, + &ConsoleLinkList{}, + &ConsoleNotification{}, + &ConsoleNotificationList{}, + &ConsolePlugin{}, + &ConsolePluginList{}, + &ConsoleQuickStart{}, + &ConsoleQuickStartList{}, + &ConsoleSample{}, + &ConsoleSampleList{}, + &ConsoleYAMLSample{}, + &ConsoleYAMLSampleList{}, + ) + metav1.AddToGroupVersion(scheme, GroupVersion) + return nil +} diff --git a/vendor/github.com/openshift/api/console/v1/stable.consoleclidownload.testsuite.yaml b/vendor/github.com/openshift/api/console/v1/stable.consoleclidownload.testsuite.yaml new file mode 100644 index 000000000..8faef369b --- /dev/null +++ b/vendor/github.com/openshift/api/console/v1/stable.consoleclidownload.testsuite.yaml @@ -0,0 +1,20 @@ +apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this +name: "[Stable] ConsoleCLIDownload" +crd: 00_consoleclidownload.crd.yaml +tests: + onCreate: + - name: Should be able to create a minimal ConsoleCLIDownload + initial: | + apiVersion: console.openshift.io/v1 + kind: ConsoleCLIDownload + spec: + description: foo + displayName: foo + links: [] + expected: | + apiVersion: console.openshift.io/v1 + kind: ConsoleCLIDownload + spec: + description: foo + displayName: foo + links: [] diff --git a/vendor/github.com/openshift/api/console/v1/stable.consoleexternalloglink.testsuite.yaml b/vendor/github.com/openshift/api/console/v1/stable.consoleexternalloglink.testsuite.yaml new file mode 100644 index 000000000..8602d88b8 --- /dev/null +++ b/vendor/github.com/openshift/api/console/v1/stable.consoleexternalloglink.testsuite.yaml @@ -0,0 +1,18 @@ +apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this +name: "[Stable] ConsoleExternalLogLink" +crd: 00_consoleexternalloglink.crd.yaml +tests: + onCreate: + - name: Should be able to create a minimal ConsoleExternalLogLink + initial: | + apiVersion: console.openshift.io/v1 + kind: ConsoleExternalLogLink + spec: + text: foo + hrefTemplate: "https://" + expected: | + apiVersion: console.openshift.io/v1 + kind: ConsoleExternalLogLink + spec: + text: foo + hrefTemplate: "https://" diff --git a/vendor/github.com/openshift/api/console/v1/stable.consolelink.testsuite.yaml b/vendor/github.com/openshift/api/console/v1/stable.consolelink.testsuite.yaml new file mode 100644 index 000000000..87415ec16 --- /dev/null +++ b/vendor/github.com/openshift/api/console/v1/stable.consolelink.testsuite.yaml @@ -0,0 +1,20 @@ +apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this +name: "[Stable] ConsoleLink" +crd: 00_consolelink.crd.yaml +tests: + onCreate: + - name: Should be able to create a minimal ConsoleLink + initial: | + apiVersion: console.openshift.io/v1 + kind: ConsoleLink + spec: + href: "https://" + location: HelpMenu + text: foo + expected: | + apiVersion: console.openshift.io/v1 + kind: ConsoleLink + spec: + href: "https://" + location: HelpMenu + text: foo diff --git a/vendor/github.com/openshift/api/console/v1/stable.consolenotification.testsuite.yaml b/vendor/github.com/openshift/api/console/v1/stable.consolenotification.testsuite.yaml new file mode 100644 index 000000000..c60dd0a64 --- /dev/null +++ b/vendor/github.com/openshift/api/console/v1/stable.consolenotification.testsuite.yaml @@ -0,0 +1,16 @@ +apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this +name: "[Stable] ConsoleNotification" +crd: 00_consolenotification.crd.yaml +tests: + onCreate: + - name: Should be able to create a minimal ConsoleNotification + initial: | + apiVersion: console.openshift.io/v1 + kind: ConsoleNotification + spec: + text: foo + expected: | + apiVersion: console.openshift.io/v1 + kind: ConsoleNotification + spec: + text: foo diff --git a/vendor/github.com/openshift/api/console/v1/stable.consoleplugin.testsuite.yaml b/vendor/github.com/openshift/api/console/v1/stable.consoleplugin.testsuite.yaml new file mode 100644 index 000000000..0abe23ba7 --- /dev/null +++ b/vendor/github.com/openshift/api/console/v1/stable.consoleplugin.testsuite.yaml @@ -0,0 +1,88 @@ +apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this +name: "[Stable] ConsolePlugin" +crd: 90_consoleplugin.crd.yaml +tests: + onCreate: + - name: Should be able to create a minimal ConsolePlugin + initial: | + apiVersion: console.openshift.io/v1 + kind: ConsolePlugin + spec: + displayName: foo + backend: + type: Service + expected: | + apiVersion: console.openshift.io/v1 + kind: ConsolePlugin + spec: + displayName: foo + backend: + type: Service + - name: Should be able to create a ConsolePlugin with default i18n loadType + initial: | + apiVersion: console.openshift.io/v1 + kind: ConsolePlugin + spec: + displayName: foo + backend: + type: Service + i18n: + loadType: "" + expected: | + apiVersion: console.openshift.io/v1 + kind: ConsolePlugin + spec: + displayName: foo + backend: + type: Service + i18n: + loadType: "" + - name: Should be able to create a ConsolePlugin with Preload i18n loadType + initial: | + apiVersion: console.openshift.io/v1 + kind: ConsolePlugin + spec: + displayName: foo + backend: + type: Service + i18n: + loadType: Preload + expected: | + apiVersion: console.openshift.io/v1 + kind: ConsolePlugin + spec: + displayName: foo + backend: + type: Service + i18n: + loadType: Preload + - name: Should be able to create a ConsolePlugin with Lazy i18n loadType + initial: | + apiVersion: console.openshift.io/v1 + kind: ConsolePlugin + spec: + displayName: foo + backend: + type: Service + i18n: + loadType: Lazy + expected: | + apiVersion: console.openshift.io/v1 + kind: ConsolePlugin + spec: + displayName: foo + backend: + type: Service + i18n: + loadType: Lazy + - name: Should reject to create a ConsolePlugin with invalid i18n loadType + initial: | + apiVersion: console.openshift.io/v1 + kind: ConsolePlugin + spec: + displayName: foo + backend: + type: Service + i18n: + loadType: Invalid + expectedError: "Unsupported value: \"Invalid\"" diff --git a/vendor/github.com/openshift/api/console/v1/stable.consolequickstart.testsuite.yaml b/vendor/github.com/openshift/api/console/v1/stable.consolequickstart.testsuite.yaml new file mode 100644 index 000000000..d9c3ec93f --- /dev/null +++ b/vendor/github.com/openshift/api/console/v1/stable.consolequickstart.testsuite.yaml @@ -0,0 +1,28 @@ +apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this +name: "[Stable] ConsoleQuickStart" +crd: 00_consolequickstart.crd.yaml +tests: + onCreate: + - name: Should be able to create a minimal ConsoleQuickStart + initial: | + apiVersion: console.openshift.io/v1 + kind: ConsoleQuickStart + spec: + description: foo + displayName: foo + durationMinutes: 10 + introduction: foo + tasks: + - title: foo + description: foo + expected: | + apiVersion: console.openshift.io/v1 + kind: ConsoleQuickStart + spec: + description: foo + displayName: foo + durationMinutes: 10 + introduction: foo + tasks: + - title: foo + description: foo diff --git a/vendor/github.com/openshift/api/console/v1/stable.consolesample.testsuite.yaml b/vendor/github.com/openshift/api/console/v1/stable.consolesample.testsuite.yaml new file mode 100644 index 000000000..f5af74360 --- /dev/null +++ b/vendor/github.com/openshift/api/console/v1/stable.consolesample.testsuite.yaml @@ -0,0 +1,183 @@ +apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this +name: "[Stable] ConsoleSample" +crd: 00_consolesample.crd.yaml +tests: + onCreate: + - name: Should be able to create a minimal ConsoleSample with GitImport + initial: | + apiVersion: console.openshift.io/v1 + kind: ConsoleSample + spec: + title: Java with Maven test + abstract: Build and run Java applications using Maven and OpenJDK. + description: Build and run Java applications using Maven and OpenJDK. + source: + type: GitImport + gitImport: + repository: + url: https://github.com/jboss-openshift/openshift-quickstarts + expected: | + apiVersion: console.openshift.io/v1 + kind: ConsoleSample + spec: + title: Java with Maven test + abstract: Build and run Java applications using Maven and OpenJDK. + description: Build and run Java applications using Maven and OpenJDK. + source: + type: GitImport + gitImport: + repository: + url: https://github.com/jboss-openshift/openshift-quickstarts + service: + targetPort: 8080 + - name: Should be able to create a minimal ConsoleSample with ContainerImport + initial: | + apiVersion: console.openshift.io/v1 + kind: ConsoleSample + spec: + title: Base image test + abstract: Test the minimal Red Hat Universal Base Image (UBI). + description: Test the minimal Red Hat Universal Base Image (UBI). + source: + type: ContainerImport + containerImport: + image: registry.access.redhat.com/ubi8/ubi-minimal:8.8-860 + expected: | + apiVersion: console.openshift.io/v1 + kind: ConsoleSample + spec: + title: Base image test + abstract: Test the minimal Red Hat Universal Base Image (UBI). + description: Test the minimal Red Hat Universal Base Image (UBI). + source: + type: ContainerImport + containerImport: + image: registry.access.redhat.com/ubi8/ubi-minimal:8.8-860 + service: + targetPort: 8080 + - name: Should be able to create a full ConsoleSample with GitImport + initial: | + apiVersion: console.openshift.io/v1 + kind: ConsoleSample + spec: + title: Java with Maven test + abstract: Build and run Java applications using Maven and OpenJDK. + description: Build and run Java applications using Maven and OpenJDK. + icon: data:image;base64,base64 encoded image + type: Serverless function + provider: Red Hat + tags: + - java + - jboss + - openjdk + source: + type: GitImport + gitImport: + repository: + url: https://github.com/openshift-dev-console/nodejs-sample + revision: main + contextDir: /backend + service: + targetPort: 3000 + expected: | + apiVersion: console.openshift.io/v1 + kind: ConsoleSample + spec: + title: Java with Maven test + abstract: Build and run Java applications using Maven and OpenJDK. + description: Build and run Java applications using Maven and OpenJDK. + icon: data:image;base64,base64 encoded image + type: Serverless function + provider: Red Hat + tags: + - java + - jboss + - openjdk + source: + type: GitImport + gitImport: + repository: + url: https://github.com/openshift-dev-console/nodejs-sample + revision: main + contextDir: /backend + service: + targetPort: 3000 + - name: Should be able to create a full ConsoleSample with ContainerImport + initial: | + apiVersion: console.openshift.io/v1 + kind: ConsoleSample + spec: + title: Base image test + abstract: Test the minimal Red Hat Universal Base Image (UBI). + description: Test the minimal Red Hat Universal Base Image (UBI). + icon: data:image;base64,base64 encoded image + type: Serverless function + provider: Red Hat + tags: + - java + - jboss + - openjdk + source: + type: ContainerImport + containerImport: + image: registry.access.redhat.com/ubi8/ubi-minimal:8.8-860 + service: + targetPort: 3000 + expected: | + apiVersion: console.openshift.io/v1 + kind: ConsoleSample + spec: + title: Base image test + abstract: Test the minimal Red Hat Universal Base Image (UBI). + description: Test the minimal Red Hat Universal Base Image (UBI). + icon: data:image;base64,base64 encoded image + type: Serverless function + provider: Red Hat + tags: + - java + - jboss + - openjdk + source: + type: ContainerImport + containerImport: + image: registry.access.redhat.com/ubi8/ubi-minimal:8.8-860 + service: + targetPort: 3000 + - name: Should decline a ConsoleSample when reusing a tag + initial: | + apiVersion: console.openshift.io/v1 + kind: ConsoleSample + spec: + title: Java with Maven test + abstract: Build and run Java applications using Maven and OpenJDK. + description: Build and run Java applications using Maven and OpenJDK. + icon: data:image;base64,base64 encoded image + type: Serverless function + provider: Red Hat + tags: + - same-tag-should-be-decline + - same-tag-should-be-decline + source: + type: GitImport + gitImport: + repository: + url: https://github.com/openshift-dev-console/nodejs-sample + expectedError: "spec.tags[1]: Duplicate value: \"same-tag-should-be-decline\"" + - name: Should decline a ConsoleSample with more then 10 tags + initial: | + apiVersion: console.openshift.io/v1 + kind: ConsoleSample + spec: + title: Java with Maven test + abstract: Build and run Java applications using Maven and OpenJDK. + description: Build and run Java applications using Maven and OpenJDK. + icon: data:image;base64,base64 encoded image + type: Serverless function + provider: Red Hat + tags: [tag1, tag2, tag3, tag4, tag5, tag6, tag7, tag8, tag9, tag10, tag11] + source: + type: GitImport + gitImport: + repository: + url: https://github.com/openshift-dev-console/nodejs-sample + expectedError: "spec.tags: Too many: 11: must have at most 10 items" diff --git a/vendor/github.com/openshift/api/console/v1/stable.consoleyamlsample.testsuite.yaml b/vendor/github.com/openshift/api/console/v1/stable.consoleyamlsample.testsuite.yaml new file mode 100644 index 000000000..1e72d5ac7 --- /dev/null +++ b/vendor/github.com/openshift/api/console/v1/stable.consoleyamlsample.testsuite.yaml @@ -0,0 +1,26 @@ +apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this +name: "[Stable] ConsoleYAMLSample" +crd: 00_consoleyamlsample.crd.yaml +tests: + onCreate: + - name: Should be able to create a minimal ConsoleYAMLSample + initial: | + apiVersion: console.openshift.io/v1 + kind: ConsoleYAMLSample + spec: + description: foo + targetResource: + apiVersion: foo + kind: foo + title: foo + yaml: foo + expected: | + apiVersion: console.openshift.io/v1 + kind: ConsoleYAMLSample + spec: + description: foo + targetResource: + apiVersion: foo + kind: foo + title: foo + yaml: foo diff --git a/vendor/github.com/openshift/api/console/v1/types.go b/vendor/github.com/openshift/api/console/v1/types.go new file mode 100644 index 000000000..416eaa3e8 --- /dev/null +++ b/vendor/github.com/openshift/api/console/v1/types.go @@ -0,0 +1,10 @@ +package v1 + +// Represents a standard link that could be generated in HTML +type Link struct { + // text is the display text for the link + Text string `json:"text"` + // href is the absolute secure URL for the link (must use https) + // +kubebuilder:validation:Pattern=`^https://` + Href string `json:"href"` +} diff --git a/vendor/github.com/openshift/api/console/v1/types_console_cli_download.go b/vendor/github.com/openshift/api/console/v1/types_console_cli_download.go new file mode 100644 index 000000000..0e77bedae --- /dev/null +++ b/vendor/github.com/openshift/api/console/v1/types_console_cli_download.go @@ -0,0 +1,54 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ConsoleCLIDownload is an extension for configuring openshift web console command line interface (CLI) downloads. +// +// Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=2 +type ConsoleCLIDownload struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ConsoleCLIDownloadSpec `json:"spec"` +} + +// ConsoleCLIDownloadSpec is the desired cli download configuration. +type ConsoleCLIDownloadSpec struct { + // displayName is the display name of the CLI download. + DisplayName string `json:"displayName"` + // description is the description of the CLI download (can include markdown). + Description string `json:"description"` + // links is a list of objects that provide CLI download link details. + Links []CLIDownloadLink `json:"links"` +} + +type CLIDownloadLink struct { + // text is the display text for the link + // +optional + Text string `json:"text"` + // href is the absolute secure URL for the link (must use https) + // +kubebuilder:validation:Pattern=`^https://` + Href string `json:"href"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=2 +type ConsoleCLIDownloadList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + Items []ConsoleCLIDownload `json:"items"` +} diff --git a/vendor/github.com/openshift/api/console/v1/types_console_external_log_links.go b/vendor/github.com/openshift/api/console/v1/types_console_external_log_links.go new file mode 100644 index 000000000..6cf252af9 --- /dev/null +++ b/vendor/github.com/openshift/api/console/v1/types_console_external_log_links.go @@ -0,0 +1,65 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ConsoleExternalLogLink is an extension for customizing OpenShift web console log links. +// +// Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=2 +type ConsoleExternalLogLink struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ConsoleExternalLogLinkSpec `json:"spec"` +} + +// ConsoleExternalLogLinkSpec is the desired log link configuration. +// The log link will appear on the logs tab of the pod details page. +type ConsoleExternalLogLinkSpec struct { + // text is the display text for the link + Text string `json:"text"` + // hrefTemplate is an absolute secure URL (must use https) for the log link including + // variables to be replaced. Variables are specified in the URL with the format ${variableName}, + // for instance, ${containerName} and will be replaced with the corresponding values + // from the resource. Resource is a pod. + // Supported variables are: + // - ${resourceName} - name of the resource which containes the logs + // - ${resourceUID} - UID of the resource which contains the logs + // - e.g. `11111111-2222-3333-4444-555555555555` + // - ${containerName} - name of the resource's container that contains the logs + // - ${resourceNamespace} - namespace of the resource that contains the logs + // - ${resourceNamespaceUID} - namespace UID of the resource that contains the logs + // - ${podLabels} - JSON representation of labels matching the pod with the logs + // - e.g. `{"key1":"value1","key2":"value2"}` + // + // e.g., https://example.com/logs?resourceName=${resourceName}&containerName=${containerName}&resourceNamespace=${resourceNamespace}&podLabels=${podLabels} + // +kubebuilder:validation:Pattern=`^https://` + HrefTemplate string `json:"hrefTemplate"` + // namespaceFilter is a regular expression used to restrict a log link to a + // matching set of namespaces (e.g., `^openshift-`). The string is converted + // into a regular expression using the JavaScript RegExp constructor. + // If not specified, links will be displayed for all the namespaces. + // +optional + NamespaceFilter string `json:"namespaceFilter,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=2 +type ConsoleExternalLogLinkList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + Items []ConsoleExternalLogLink `json:"items"` +} diff --git a/vendor/github.com/openshift/api/console/v1/types_console_link.go b/vendor/github.com/openshift/api/console/v1/types_console_link.go new file mode 100644 index 000000000..8ba48907f --- /dev/null +++ b/vendor/github.com/openshift/api/console/v1/types_console_link.go @@ -0,0 +1,94 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ConsoleLink is an extension for customizing OpenShift web console links. +// +// Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=2 +type ConsoleLink struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ConsoleLinkSpec `json:"spec"` +} + +// ConsoleLinkSpec is the desired console link configuration. +type ConsoleLinkSpec struct { + Link `json:",inline"` + // location determines which location in the console the link will be appended to (ApplicationMenu, HelpMenu, UserMenu, NamespaceDashboard). + Location ConsoleLinkLocation `json:"location"` + // applicationMenu holds information about section and icon used for the link in the + // application menu, and it is applicable only when location is set to ApplicationMenu. + // + // +optional + ApplicationMenu *ApplicationMenuSpec `json:"applicationMenu,omitempty"` + // namespaceDashboard holds information about namespaces in which the dashboard link should + // appear, and it is applicable only when location is set to NamespaceDashboard. + // If not specified, the link will appear in all namespaces. + // + // +optional + NamespaceDashboard *NamespaceDashboardSpec `json:"namespaceDashboard,omitempty"` +} + +// ApplicationMenuSpec is the specification of the desired section and icon used for the link in the application menu. +type ApplicationMenuSpec struct { + // section is the section of the application menu in which the link should appear. + // This can be any text that will appear as a subheading in the application menu dropdown. + // A new section will be created if the text does not match text of an existing section. + Section string `json:"section"` + // imageUrl is the URL for the icon used in front of the link in the application menu. + // The URL must be an HTTPS URL or a Data URI. The image should be square and will be shown at 24x24 pixels. + // +optional + ImageURL string `json:"imageURL,omitempty"` +} + +// NamespaceDashboardSpec is a specification of namespaces in which the dashboard link should appear. +// If both namespaces and namespaceSelector are specified, the link will appear in namespaces that match either +type NamespaceDashboardSpec struct { + // namespaces is an array of namespace names in which the dashboard link should appear. + // + // +optional + Namespaces []string `json:"namespaces,omitempty"` + // namespaceSelector is used to select the Namespaces that should contain dashboard link by label. + // If the namespace labels match, dashboard link will be shown for the namespaces. + // + // +optional + NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty"` +} + +// ConsoleLinkLocationSelector is a set of possible menu targets to which a link may be appended. +// +kubebuilder:validation:Pattern=`^(ApplicationMenu|HelpMenu|UserMenu|NamespaceDashboard)$` +type ConsoleLinkLocation string + +const ( + // HelpMenu indicates that the link should appear in the help menu in the console. + HelpMenu ConsoleLinkLocation = "HelpMenu" + // UserMenu indicates that the link should appear in the user menu in the console. + UserMenu ConsoleLinkLocation = "UserMenu" + // ApplicationMenu indicates that the link should appear inside the application menu of the console. + ApplicationMenu ConsoleLinkLocation = "ApplicationMenu" + // NamespaceDashboard indicates that the link should appear in the namespaced dashboard of the console. + NamespaceDashboard ConsoleLinkLocation = "NamespaceDashboard" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=2 +type ConsoleLinkList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + Items []ConsoleLink `json:"items"` +} diff --git a/vendor/github.com/openshift/api/console/v1/types_console_notification.go b/vendor/github.com/openshift/api/console/v1/types_console_notification.go new file mode 100644 index 000000000..5408f1044 --- /dev/null +++ b/vendor/github.com/openshift/api/console/v1/types_console_notification.go @@ -0,0 +1,68 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ConsoleNotification is the extension for configuring openshift web console notifications. +// +// Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=2 +type ConsoleNotification struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ConsoleNotificationSpec `json:"spec"` +} + +// ConsoleNotificationSpec is the desired console notification configuration. +type ConsoleNotificationSpec struct { + // text is the visible text of the notification. + Text string `json:"text"` + // location is the location of the notification in the console. + // Valid values are: "BannerTop", "BannerBottom", "BannerTopBottom". + // +optional + Location ConsoleNotificationLocation `json:"location,omitempty"` + // link is an object that holds notification link details. + // +optional + Link *Link `json:"link,omitempty"` + // color is the color of the text for the notification as CSS data type color. + // +optional + Color string `json:"color,omitempty"` + // backgroundColor is the color of the background for the notification as CSS data type color. + // +optional + BackgroundColor string `json:"backgroundColor,omitempty"` +} + +// ConsoleNotificationLocationSelector is a set of possible notification targets +// to which a notification may be appended. +// +kubebuilder:validation:Pattern=`^(BannerTop|BannerBottom|BannerTopBottom)$` +type ConsoleNotificationLocation string + +const ( + // BannerTop indicates that the notification should appear at the top of the console. + BannerTop ConsoleNotificationLocation = "BannerTop" + // BannerBottom indicates that the notification should appear at the bottom of the console. + BannerBottom ConsoleNotificationLocation = "BannerBottom" + // BannerTopBottom indicates that the notification should appear both at the top and at the bottom of the console. + BannerTopBottom ConsoleNotificationLocation = "BannerTopBottom" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=2 +type ConsoleNotificationList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + Items []ConsoleNotification `json:"items"` +} diff --git a/vendor/github.com/openshift/api/console/v1/types_console_plugin.go b/vendor/github.com/openshift/api/console/v1/types_console_plugin.go new file mode 100644 index 000000000..624ad73b1 --- /dev/null +++ b/vendor/github.com/openshift/api/console/v1/types_console_plugin.go @@ -0,0 +1,244 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +openshift:compatibility-gen:level=1 + +// ConsolePlugin is an extension for customizing OpenShift web console by +// dynamically loading code from another service running on the cluster. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +type ConsolePlugin struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata"` + + // +kubebuilder:validation:Required + Spec ConsolePluginSpec `json:"spec"` +} + +// ConsolePluginSpec is the desired plugin configuration. +type ConsolePluginSpec struct { + // displayName is the display name of the plugin. + // The dispalyName should be between 1 and 128 characters. + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=128 + DisplayName string `json:"displayName"` + // backend holds the configuration of backend which is serving console's plugin . + // +kubebuilder:validation:Required + Backend ConsolePluginBackend `json:"backend"` + // proxy is a list of proxies that describe various service type + // to which the plugin needs to connect to. + // +optional + Proxy []ConsolePluginProxy `json:"proxy,omitempty"` + // i18n is the configuration of plugin's localization resources. + // +optional + I18n ConsolePluginI18n `json:"i18n"` +} + +// LoadType is an enumeration of i18n loading types +// +kubebuilder:validation:Enum:=Preload;Lazy;"" +type LoadType string + +const ( + // Preload will load all plugin's localization resources during + // loading of the plugin. + Preload LoadType = "Preload" + // Lazy wont preload any plugin's localization resources, instead + // will leave thier loading to runtime's lazy-loading. + Lazy LoadType = "Lazy" + // Empty is the default value of the LoadType field and it's + // purpose is to improve discoverability of the field. The + // the behaviour is equivalent to Lazy type. + Empty LoadType = "" +) + +// ConsolePluginI18n holds information on localization resources that are served by +// the dynamic plugin. +type ConsolePluginI18n struct { + // loadType indicates how the plugin's localization resource should be loaded. + // Valid values are Preload, Lazy and the empty string. + // When set to Preload, all localization resources are fetched when the plugin is loaded. + // When set to Lazy, localization resources are lazily loaded as and when they are required by the console. + // When omitted or set to the empty string, the behaviour is equivalent to Lazy type. + // +kubebuilder:validation:Required + LoadType LoadType `json:"loadType"` +} + +// ConsolePluginProxy holds information on various service types +// to which console's backend will proxy the plugin's requests. +type ConsolePluginProxy struct { + // endpoint provides information about endpoint to which the request is proxied to. + // +kubebuilder:validation:Required + Endpoint ConsolePluginProxyEndpoint `json:"endpoint"` + // alias is a proxy name that identifies the plugin's proxy. An alias name + // should be unique per plugin. The console backend exposes following + // proxy endpoint: + // + // /api/proxy/plugin///? + // + // Request example path: + // + // /api/proxy/plugin/acm/search/pods?namespace=openshift-apiserver + // + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=128 + // +kubebuilder:validation:Pattern=`^[A-Za-z0-9-_]+$` + Alias string `json:"alias"` + // caCertificate provides the cert authority certificate contents, + // in case the proxied Service is using custom service CA. + // By default, the service CA bundle provided by the service-ca operator is used. + // +kubebuilder:validation:Pattern=`^-----BEGIN CERTIFICATE-----([\s\S]*)-----END CERTIFICATE-----\s?$` + // +optional + CACertificate string `json:"caCertificate,omitempty"` + // authorization provides information about authorization type, + // which the proxied request should contain + // +kubebuilder:default:="None" + // +optional + Authorization AuthorizationType `json:"authorization,omitempty"` +} + +// ConsolePluginProxyEndpoint holds information about the endpoint to which +// request will be proxied to. +// +union +type ConsolePluginProxyEndpoint struct { + // type is the type of the console plugin's proxy. Currently only "Service" is supported. + // + // --- + // + When handling unknown values, consumers should report an error and stop processing the plugin. + // + // +kubebuilder:validation:Required + // +unionDiscriminator + Type ConsolePluginProxyType `json:"type"` + // service is an in-cluster Service that the plugin will connect to. + // The Service must use HTTPS. The console backend exposes an endpoint + // in order to proxy communication between the plugin and the Service. + // Note: service field is required for now, since currently only "Service" + // type is supported. + // +optional + Service *ConsolePluginProxyServiceConfig `json:"service,omitempty"` +} + +// ProxyType is an enumeration of available proxy types +// +kubebuilder:validation:Enum:=Service +type ConsolePluginProxyType string + +const ( + // ProxyTypeService is used when proxying communication to a Service + ProxyTypeService ConsolePluginProxyType = "Service" +) + +// AuthorizationType is an enumerate of available authorization types +// +kubebuilder:validation:Enum:=UserToken;None +type AuthorizationType string + +const ( + // UserToken indicates that the proxied request should contain the logged-in user's + // OpenShift access token in the "Authorization" request header. For example: + // + // Authorization: Bearer sha256~kV46hPnEYhCWFnB85r5NrprAxggzgb6GOeLbgcKNsH0 + // + UserToken AuthorizationType = "UserToken" + // None indicates that proxied request wont contain authorization of any type. + None AuthorizationType = "None" +) + +// ProxyTypeServiceConfig holds information on Service to which +// console's backend will proxy the plugin's requests. +type ConsolePluginProxyServiceConfig struct { + // name of Service that the plugin needs to connect to. + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=128 + Name string `json:"name"` + // namespace of Service that the plugin needs to connect to + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=128 + Namespace string `json:"namespace"` + // port on which the Service that the plugin needs to connect to + // is listening on. + // +kubebuilder:validation:Required + // +kubebuilder:validation:Maximum:=65535 + // +kubebuilder:validation:Minimum:=1 + Port int32 `json:"port"` +} + +// ConsolePluginBackendType is an enumeration of available backend types +// +kubebuilder:validation:Enum:=Service +type ConsolePluginBackendType string + +const ( + // Service is used when plugin's backend is served by a Kubernetes Service + Service ConsolePluginBackendType = "Service" +) + +// ConsolePluginBackend holds information about the endpoint which serves +// the console's plugin +// +union +type ConsolePluginBackend struct { + // type is the backend type which servers the console's plugin. Currently only "Service" is supported. + // + // --- + // + When handling unknown values, consumers should report an error and stop processing the plugin. + // + // +kubebuilder:validation:Required + // +unionDiscriminator + Type ConsolePluginBackendType `json:"type"` + // service is a Kubernetes Service that exposes the plugin using a + // deployment with an HTTP server. The Service must use HTTPS and + // Service serving certificate. The console backend will proxy the + // plugins assets from the Service using the service CA bundle. + // +optional + Service *ConsolePluginService `json:"service"` +} + +// ConsolePluginService holds information on Service that is serving +// console dynamic plugin assets. +type ConsolePluginService struct { + // name of Service that is serving the plugin assets. + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=128 + Name string `json:"name"` + // namespace of Service that is serving the plugin assets. + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=128 + Namespace string `json:"namespace"` + // port on which the Service that is serving the plugin is listening to. + // +kubebuilder:validation:Required + // +kubebuilder:validation:Maximum:=65535 + // +kubebuilder:validation:Minimum:=1 + Port int32 `json:"port"` + // basePath is the path to the plugin's assets. The primary asset it the + // manifest file called `plugin-manifest.json`, which is a JSON document + // that contains metadata about the plugin and the extensions. + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=256 + // +kubebuilder:validation:Pattern=`^[a-zA-Z0-9.\-_~!$&'()*+,;=:@\/]*$` + // +kubebuilder:default:="/" + // +optional + BasePath string `json:"basePath"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +openshift:compatibility-gen:level=1 + +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +type ConsolePluginList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + Items []ConsolePlugin `json:"items"` +} diff --git a/vendor/github.com/openshift/api/console/v1/types_console_quick_start.go b/vendor/github.com/openshift/api/console/v1/types_console_quick_start.go new file mode 100644 index 000000000..613dfa903 --- /dev/null +++ b/vendor/github.com/openshift/api/console/v1/types_console_quick_start.go @@ -0,0 +1,143 @@ +package v1 + +import ( + authorizationv1 "k8s.io/api/authorization/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ConsoleQuickStart is an extension for guiding user through various +// workflows in the OpenShift web console. +// +// Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=2 +type ConsoleQuickStart struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty"` + + // +kubebuilder:validation:Required + // +required + Spec ConsoleQuickStartSpec `json:"spec"` +} + +// ConsoleQuickStartSpec is the desired quick start configuration. +type ConsoleQuickStartSpec struct { + // displayName is the display name of the Quick Start. + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +required + DisplayName string `json:"displayName"` + // icon is a base64 encoded image that will be displayed beside the Quick Start display name. + // The icon should be an vector image for easy scaling. The size of the icon should be 40x40. + // +optional + Icon string `json:"icon,omitempty"` + // tags is a list of strings that describe the Quick Start. + // +optional + Tags []string `json:"tags,omitempty"` + // durationMinutes describes approximately how many minutes it will take to complete the Quick Start. + // +kubebuilder:validation:Required + // +kubebuilder:validation:Minimum=1 + // +required + DurationMinutes int `json:"durationMinutes"` + // description is the description of the Quick Start. (includes markdown) + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=256 + // +required + Description string `json:"description"` + // prerequisites contains all prerequisites that need to be met before taking a Quick Start. (includes markdown) + // +optional + Prerequisites []string `json:"prerequisites,omitempty"` + // introduction describes the purpose of the Quick Start. (includes markdown) + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +required + Introduction string `json:"introduction"` + // tasks is the list of steps the user has to perform to complete the Quick Start. + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinItems=1 + // +required + Tasks []ConsoleQuickStartTask `json:"tasks"` + // conclusion sums up the Quick Start and suggests the possible next steps. (includes markdown) + // +optional + Conclusion string `json:"conclusion,omitempty"` + // nextQuickStart is a list of the following Quick Starts, suggested for the user to try. + // +optional + NextQuickStart []string `json:"nextQuickStart,omitempty"` + // accessReviewResources contains a list of resources that the user's access + // will be reviewed against in order for the user to complete the Quick Start. + // The Quick Start will be hidden if any of the access reviews fail. + // +optional + AccessReviewResources []authorizationv1.ResourceAttributes `json:"accessReviewResources,omitempty"` +} + +// ConsoleQuickStartTask is a single step in a Quick Start. +type ConsoleQuickStartTask struct { + // title describes the task and is displayed as a step heading. + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +required + Title string `json:"title"` + // description describes the steps needed to complete the task. (includes markdown) + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +required + Description string `json:"description"` + // review contains instructions to validate the task is complete. The user will select 'Yes' or 'No'. + // using a radio button, which indicates whether the step was completed successfully. + // +optional + Review *ConsoleQuickStartTaskReview `json:"review,omitempty"` + // summary contains information about the passed step. + // +optional + Summary *ConsoleQuickStartTaskSummary `json:"summary,omitempty"` +} + +// ConsoleQuickStartTaskReview contains instructions that validate a task was completed successfully. +type ConsoleQuickStartTaskReview struct { + // instructions contains steps that user needs to take in order + // to validate his work after going through a task. (includes markdown) + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +required + Instructions string `json:"instructions"` + // failedTaskHelp contains suggestions for a failed task review and is shown at the end of task. (includes markdown) + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +required + FailedTaskHelp string `json:"failedTaskHelp"` +} + +// ConsoleQuickStartTaskSummary contains information about a passed step. +type ConsoleQuickStartTaskSummary struct { + // success describes the succesfully passed task. + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +required + Success string `json:"success"` + // failed briefly describes the unsuccessfully passed task. (includes markdown) + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=128 + // +required + Failed string `json:"failed"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=2 +type ConsoleQuickStartList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + Items []ConsoleQuickStart `json:"items"` +} diff --git a/vendor/github.com/openshift/api/console/v1/types_console_sample.go b/vendor/github.com/openshift/api/console/v1/types_console_sample.go new file mode 100644 index 000000000..7edb4ba9e --- /dev/null +++ b/vendor/github.com/openshift/api/console/v1/types_console_sample.go @@ -0,0 +1,266 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ConsoleSample is an extension to customizing OpenShift web console by adding samples. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ConsoleSample struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata"` + + // spec contains configuration for a console sample. + // +kubebuilder:validation:Required + Spec ConsoleSampleSpec `json:"spec"` +} + +// ConsoleSampleSpec is the desired sample for the web console. +// Samples will appear with their title, descriptions and a badge in a samples catalog. +type ConsoleSampleSpec struct { + // title is the display name of the sample. + // + // It is required and must be no more than 50 characters in length. + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=50 + Title string `json:"title"` + + // abstract is a short introduction to the sample. + // + // It is required and must be no more than 100 characters in length. + // + // The abstract is shown on the sample card tile below the title and provider + // and is limited to three lines of content. + // +kubebuilder:validation:Required + // +kubebuilder:validation:MaxLength=100 + Abstract string `json:"abstract"` + + // description is a long form explanation of the sample. + // + // It is required and can have a maximum length of **4096** characters. + // + // It is a README.md-like content for additional information, links, pre-conditions, and other instructions. + // It will be rendered as Markdown so that it can contain line breaks, links, and other simple formatting. + // +kubebuilder:validation:Required + // +kubebuilder:validation:MaxLength=4096 + Description string `json:"description"` + + // icon is an optional base64 encoded image and shown beside the sample title. + // + // The format must follow the data: URL format and can have a maximum size of **10 KB**. + // + // data:[][;base64], + // + // For example: + // + // data:image;base64, plus the base64 encoded image. + // + // Vector images can also be used. SVG icons must start with: + // + // data:image/svg+xml;base64, plus the base64 encoded SVG image. + // + // All sample catalog icons will be shown on a white background (also when the dark theme is used). + // The web console ensures that different aspect ratios work correctly. + // Currently, the surface of the icon is at most 40x100px. + // + // For more information on the data URL format, please visit + // https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/Data_URLs. + // +optional + // +kubebuilder:validation:Pattern=`^data:([a-z/\.+0-9]*;(([-a-zA-Z0-9=])*;)?)?base64,` + // +kubebuilder:validation:MaxLength=14000 + Icon string `json:"icon"` + + // type is an optional label to group multiple samples. + // + // It is optional and must be no more than 20 characters in length. + // + // Recommendation is a singular term like "Builder Image", "Devfile" or "Serverless Function". + // + // Currently, the type is shown a badge on the sample card tile in the top right corner. + // +optional + // +kubebuilder:validation:MaxLength=20 + Type string `json:"type"` + + // provider is an optional label to honor who provides the sample. + // + // It is optional and must be no more than 50 characters in length. + // + // A provider can be a company like "Red Hat" or an organization like "CNCF" or "Knative". + // + // Currently, the provider is only shown on the sample card tile below the title with the prefix "Provided by " + // +optional + // +kubebuilder:validation:MaxLength=50 + Provider string `json:"provider"` + + // tags are optional string values that can be used to find samples in the samples catalog. + // + // Examples of common tags may be "Java", "Quarkus", etc. + // + // They will be displayed on the samples details page. + // +optional + // +listType=set + // +kubebuilder:validation:MaxItems:=10 + Tags []string `json:"tags"` + + // source defines where to deploy the sample service from. + // The sample may be sourced from an external git repository or container image. + // +kubebuilder:validation:Required + Source ConsoleSampleSource `json:"source"` +} + +// ConsoleSampleSourceType is an enumeration of the supported sample types. +// Unsupported samples types will be ignored in the web console. +// +kubebuilder:validation:Enum:=GitImport;ContainerImport +type ConsoleSampleSourceType string + +const ( + // A sample that let the user import code from a git repository. + GitImport ConsoleSampleSourceType = "GitImport" + // A sample that let the user import a container image. + ContainerImport ConsoleSampleSourceType = "ContainerImport" +) + +// ConsoleSampleSource is the actual sample definition and can hold different sample types. +// Unsupported sample types will be ignored in the web console. +// +union +// +kubebuilder:validation:XValidation:rule="self.type == 'GitImport' ? has(self.gitImport) : !has(self.gitImport)",message="source.gitImport is required when source.type is GitImport, and forbidden otherwise" +// +kubebuilder:validation:XValidation:rule="self.type == 'ContainerImport' ? has(self.containerImport) : !has(self.containerImport)",message="source.containerImport is required when source.type is ContainerImport, and forbidden otherwise" +type ConsoleSampleSource struct { + // type of the sample, currently supported: "GitImport";"ContainerImport" + // +unionDiscriminator + // +kubebuilder:validation:Required + // +kubebuilder:validation:Enum:="GitImport";"ContainerImport" + Type ConsoleSampleSourceType `json:"type"` + + // gitImport allows the user to import code from a git repository. + // +unionMember + // +optional + GitImport *ConsoleSampleGitImportSource `json:"gitImport,omitempty"` + + // containerImport allows the user import a container image. + // +unionMember + // +optional + ContainerImport *ConsoleSampleContainerImportSource `json:"containerImport,omitempty"` +} + +// ConsoleSampleGitImportSource let the user import code from a public Git repository. +type ConsoleSampleGitImportSource struct { + // repository contains the reference to the actual Git repository. + // +kubebuilder:validation:Required + Repository ConsoleSampleGitImportSourceRepository `json:"repository"` + // service contains configuration for the Service resource created for this sample. + // +optional + // +kubebuilder:default={"targetPort": 8080} + // +default:={"targetPort": 8080} + Service ConsoleSampleGitImportSourceService `json:"service"` +} + +// ConsoleSampleGitImportSourceRepository let the user import code from a public git repository. +type ConsoleSampleGitImportSourceRepository struct { + // url of the Git repository that contains a HTTP service. + // The HTTP service must be exposed on the default port (8080) unless + // otherwise configured with the port field. + // + // Only public repositories on GitHub, GitLab and Bitbucket are currently supported: + // + // - https://github.com// + // - https://gitlab.com// + // - https://bitbucket.org// + // + // The url must have a maximum length of 256 characters. + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=256 + // +kubebuilder:validation:Pattern=`^https:\/\/(github.com|gitlab.com|bitbucket.org)\/[a-zA-Z0-9-]+\/[a-zA-Z0-9-]+(.git)?$` + URL string `json:"url"` + // revision is the git revision at which to clone the git repository + // Can be used to clone a specific branch, tag or commit SHA. + // Must be at most 256 characters in length. + // When omitted the repository's default branch is used. + // +optional + // +kubebuilder:validation:MaxLength=256 + Revision string `json:"revision"` + // contextDir is used to specify a directory within the repository to build the + // component. + // Must start with `/` and have a maximum length of 256 characters. + // When omitted, the default value is to build from the root of the repository. + // +optional + // +kubebuilder:validation:MaxLength=256 + // +kubebuilder:validation:Pattern=`^/` + ContextDir string `json:"contextDir"` +} + +// ConsoleSampleGitImportSourceService let the samples author define defaults +// for the Service created for this sample. +type ConsoleSampleGitImportSourceService struct { + // targetPort is the port that the service listens on for HTTP requests. + // This port will be used for Service created for this sample. + // Port must be in the range 1 to 65535. + // Default port is 8080. + // +optional + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=65535 + // +kubebuilder:default=8080 + // +default:=8080 + TargetPort int32 `json:"targetPort,omitempty"` +} + +// ConsoleSampleContainerImportSource let the user import a container image. +type ConsoleSampleContainerImportSource struct { + // reference to a container image that provides a HTTP service. + // The service must be exposed on the default port (8080) unless + // otherwise configured with the port field. + // + // Supported formats: + // - / + // - docker.io// + // - quay.io// + // - quay.io//@sha256: + // - quay.io//: + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=256 + Image string `json:"image"` + // service contains configuration for the Service resource created for this sample. + // +optional + // +kubebuilder:default={"targetPort": 8080} + // +default:={"targetPort": 8080} + Service ConsoleSampleContainerImportSourceService `json:"service"` +} + +// ConsoleSampleContainerImportSourceService let the samples author define defaults +// for the Service created for this sample. +type ConsoleSampleContainerImportSourceService struct { + // targetPort is the port that the service listens on for HTTP requests. + // This port will be used for Service and Route created for this sample. + // Port must be in the range 1 to 65535. + // Default port is 8080. + // +optional + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=65535 + // +kubebuilder:default=8080 + // +default:=8080 + TargetPort int32 `json:"targetPort,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ConsoleSampleList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + Items []ConsoleSample `json:"items"` +} diff --git a/vendor/github.com/openshift/api/console/v1/types_console_yaml_sample.go b/vendor/github.com/openshift/api/console/v1/types_console_yaml_sample.go new file mode 100644 index 000000000..8888b5162 --- /dev/null +++ b/vendor/github.com/openshift/api/console/v1/types_console_yaml_sample.go @@ -0,0 +1,67 @@ +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ConsoleYAMLSample is an extension for customizing OpenShift web console YAML samples. +// +// Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=2 +type ConsoleYAMLSample struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata"` + + Spec ConsoleYAMLSampleSpec `json:"spec"` +} + +// ConsoleYAMLSampleSpec is the desired YAML sample configuration. +// Samples will appear with their descriptions in a samples sidebar +// when creating a resources in the web console. +type ConsoleYAMLSampleSpec struct { + // targetResource contains apiVersion and kind of the resource + // YAML sample is representating. + TargetResource metav1.TypeMeta `json:"targetResource"` + // title of the YAML sample. + Title ConsoleYAMLSampleTitle `json:"title"` + // description of the YAML sample. + Description ConsoleYAMLSampleDescription `json:"description"` + // yaml is the YAML sample to display. + YAML ConsoleYAMLSampleYAML `json:"yaml"` + // snippet indicates that the YAML sample is not the full YAML resource + // definition, but a fragment that can be inserted into the existing + // YAML document at the user's cursor. + // +optional + Snippet bool `json:"snippet"` +} + +// ConsoleYAMLSampleTitle of the YAML sample. +// +kubebuilder:validation:Pattern=`^(.|\s)*\S(.|\s)*$` +type ConsoleYAMLSampleTitle string + +// ConsoleYAMLSampleDescription of the YAML sample. +// +kubebuilder:validation:Pattern=`^(.|\s)*\S(.|\s)*$` +type ConsoleYAMLSampleDescription string + +// ConsoleYAMLSampleYAML is the YAML sample to display. +// +kubebuilder:validation:Pattern=`^(.|\s)*\S(.|\s)*$` +type ConsoleYAMLSampleYAML string + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=2 +type ConsoleYAMLSampleList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + Items []ConsoleYAMLSample `json:"items"` +} diff --git a/vendor/github.com/openshift/api/console/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/console/v1/zz_generated.deepcopy.go new file mode 100644 index 000000000..a268d697a --- /dev/null +++ b/vendor/github.com/openshift/api/console/v1/zz_generated.deepcopy.go @@ -0,0 +1,1032 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + authorizationv1 "k8s.io/api/authorization/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ApplicationMenuSpec) DeepCopyInto(out *ApplicationMenuSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationMenuSpec. +func (in *ApplicationMenuSpec) DeepCopy() *ApplicationMenuSpec { + if in == nil { + return nil + } + out := new(ApplicationMenuSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CLIDownloadLink) DeepCopyInto(out *CLIDownloadLink) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CLIDownloadLink. +func (in *CLIDownloadLink) DeepCopy() *CLIDownloadLink { + if in == nil { + return nil + } + out := new(CLIDownloadLink) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsoleCLIDownload) DeepCopyInto(out *ConsoleCLIDownload) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleCLIDownload. +func (in *ConsoleCLIDownload) DeepCopy() *ConsoleCLIDownload { + if in == nil { + return nil + } + out := new(ConsoleCLIDownload) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ConsoleCLIDownload) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsoleCLIDownloadList) DeepCopyInto(out *ConsoleCLIDownloadList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ConsoleCLIDownload, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleCLIDownloadList. +func (in *ConsoleCLIDownloadList) DeepCopy() *ConsoleCLIDownloadList { + if in == nil { + return nil + } + out := new(ConsoleCLIDownloadList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ConsoleCLIDownloadList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsoleCLIDownloadSpec) DeepCopyInto(out *ConsoleCLIDownloadSpec) { + *out = *in + if in.Links != nil { + in, out := &in.Links, &out.Links + *out = make([]CLIDownloadLink, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleCLIDownloadSpec. +func (in *ConsoleCLIDownloadSpec) DeepCopy() *ConsoleCLIDownloadSpec { + if in == nil { + return nil + } + out := new(ConsoleCLIDownloadSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsoleExternalLogLink) DeepCopyInto(out *ConsoleExternalLogLink) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleExternalLogLink. +func (in *ConsoleExternalLogLink) DeepCopy() *ConsoleExternalLogLink { + if in == nil { + return nil + } + out := new(ConsoleExternalLogLink) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ConsoleExternalLogLink) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsoleExternalLogLinkList) DeepCopyInto(out *ConsoleExternalLogLinkList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ConsoleExternalLogLink, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleExternalLogLinkList. +func (in *ConsoleExternalLogLinkList) DeepCopy() *ConsoleExternalLogLinkList { + if in == nil { + return nil + } + out := new(ConsoleExternalLogLinkList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ConsoleExternalLogLinkList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsoleExternalLogLinkSpec) DeepCopyInto(out *ConsoleExternalLogLinkSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleExternalLogLinkSpec. +func (in *ConsoleExternalLogLinkSpec) DeepCopy() *ConsoleExternalLogLinkSpec { + if in == nil { + return nil + } + out := new(ConsoleExternalLogLinkSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsoleLink) DeepCopyInto(out *ConsoleLink) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleLink. +func (in *ConsoleLink) DeepCopy() *ConsoleLink { + if in == nil { + return nil + } + out := new(ConsoleLink) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ConsoleLink) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsoleLinkList) DeepCopyInto(out *ConsoleLinkList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ConsoleLink, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleLinkList. +func (in *ConsoleLinkList) DeepCopy() *ConsoleLinkList { + if in == nil { + return nil + } + out := new(ConsoleLinkList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ConsoleLinkList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsoleLinkSpec) DeepCopyInto(out *ConsoleLinkSpec) { + *out = *in + out.Link = in.Link + if in.ApplicationMenu != nil { + in, out := &in.ApplicationMenu, &out.ApplicationMenu + *out = new(ApplicationMenuSpec) + **out = **in + } + if in.NamespaceDashboard != nil { + in, out := &in.NamespaceDashboard, &out.NamespaceDashboard + *out = new(NamespaceDashboardSpec) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleLinkSpec. +func (in *ConsoleLinkSpec) DeepCopy() *ConsoleLinkSpec { + if in == nil { + return nil + } + out := new(ConsoleLinkSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsoleNotification) DeepCopyInto(out *ConsoleNotification) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleNotification. +func (in *ConsoleNotification) DeepCopy() *ConsoleNotification { + if in == nil { + return nil + } + out := new(ConsoleNotification) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ConsoleNotification) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsoleNotificationList) DeepCopyInto(out *ConsoleNotificationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ConsoleNotification, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleNotificationList. +func (in *ConsoleNotificationList) DeepCopy() *ConsoleNotificationList { + if in == nil { + return nil + } + out := new(ConsoleNotificationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ConsoleNotificationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsoleNotificationSpec) DeepCopyInto(out *ConsoleNotificationSpec) { + *out = *in + if in.Link != nil { + in, out := &in.Link, &out.Link + *out = new(Link) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleNotificationSpec. +func (in *ConsoleNotificationSpec) DeepCopy() *ConsoleNotificationSpec { + if in == nil { + return nil + } + out := new(ConsoleNotificationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsolePlugin) DeepCopyInto(out *ConsolePlugin) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsolePlugin. +func (in *ConsolePlugin) DeepCopy() *ConsolePlugin { + if in == nil { + return nil + } + out := new(ConsolePlugin) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ConsolePlugin) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsolePluginBackend) DeepCopyInto(out *ConsolePluginBackend) { + *out = *in + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = new(ConsolePluginService) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsolePluginBackend. +func (in *ConsolePluginBackend) DeepCopy() *ConsolePluginBackend { + if in == nil { + return nil + } + out := new(ConsolePluginBackend) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsolePluginI18n) DeepCopyInto(out *ConsolePluginI18n) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsolePluginI18n. +func (in *ConsolePluginI18n) DeepCopy() *ConsolePluginI18n { + if in == nil { + return nil + } + out := new(ConsolePluginI18n) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsolePluginList) DeepCopyInto(out *ConsolePluginList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ConsolePlugin, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsolePluginList. +func (in *ConsolePluginList) DeepCopy() *ConsolePluginList { + if in == nil { + return nil + } + out := new(ConsolePluginList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ConsolePluginList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsolePluginProxy) DeepCopyInto(out *ConsolePluginProxy) { + *out = *in + in.Endpoint.DeepCopyInto(&out.Endpoint) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsolePluginProxy. +func (in *ConsolePluginProxy) DeepCopy() *ConsolePluginProxy { + if in == nil { + return nil + } + out := new(ConsolePluginProxy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsolePluginProxyEndpoint) DeepCopyInto(out *ConsolePluginProxyEndpoint) { + *out = *in + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = new(ConsolePluginProxyServiceConfig) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsolePluginProxyEndpoint. +func (in *ConsolePluginProxyEndpoint) DeepCopy() *ConsolePluginProxyEndpoint { + if in == nil { + return nil + } + out := new(ConsolePluginProxyEndpoint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsolePluginProxyServiceConfig) DeepCopyInto(out *ConsolePluginProxyServiceConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsolePluginProxyServiceConfig. +func (in *ConsolePluginProxyServiceConfig) DeepCopy() *ConsolePluginProxyServiceConfig { + if in == nil { + return nil + } + out := new(ConsolePluginProxyServiceConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsolePluginService) DeepCopyInto(out *ConsolePluginService) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsolePluginService. +func (in *ConsolePluginService) DeepCopy() *ConsolePluginService { + if in == nil { + return nil + } + out := new(ConsolePluginService) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsolePluginSpec) DeepCopyInto(out *ConsolePluginSpec) { + *out = *in + in.Backend.DeepCopyInto(&out.Backend) + if in.Proxy != nil { + in, out := &in.Proxy, &out.Proxy + *out = make([]ConsolePluginProxy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + out.I18n = in.I18n + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsolePluginSpec. +func (in *ConsolePluginSpec) DeepCopy() *ConsolePluginSpec { + if in == nil { + return nil + } + out := new(ConsolePluginSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsoleQuickStart) DeepCopyInto(out *ConsoleQuickStart) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleQuickStart. +func (in *ConsoleQuickStart) DeepCopy() *ConsoleQuickStart { + if in == nil { + return nil + } + out := new(ConsoleQuickStart) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ConsoleQuickStart) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsoleQuickStartList) DeepCopyInto(out *ConsoleQuickStartList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ConsoleQuickStart, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleQuickStartList. +func (in *ConsoleQuickStartList) DeepCopy() *ConsoleQuickStartList { + if in == nil { + return nil + } + out := new(ConsoleQuickStartList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ConsoleQuickStartList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsoleQuickStartSpec) DeepCopyInto(out *ConsoleQuickStartSpec) { + *out = *in + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Prerequisites != nil { + in, out := &in.Prerequisites, &out.Prerequisites + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Tasks != nil { + in, out := &in.Tasks, &out.Tasks + *out = make([]ConsoleQuickStartTask, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NextQuickStart != nil { + in, out := &in.NextQuickStart, &out.NextQuickStart + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.AccessReviewResources != nil { + in, out := &in.AccessReviewResources, &out.AccessReviewResources + *out = make([]authorizationv1.ResourceAttributes, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleQuickStartSpec. +func (in *ConsoleQuickStartSpec) DeepCopy() *ConsoleQuickStartSpec { + if in == nil { + return nil + } + out := new(ConsoleQuickStartSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsoleQuickStartTask) DeepCopyInto(out *ConsoleQuickStartTask) { + *out = *in + if in.Review != nil { + in, out := &in.Review, &out.Review + *out = new(ConsoleQuickStartTaskReview) + **out = **in + } + if in.Summary != nil { + in, out := &in.Summary, &out.Summary + *out = new(ConsoleQuickStartTaskSummary) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleQuickStartTask. +func (in *ConsoleQuickStartTask) DeepCopy() *ConsoleQuickStartTask { + if in == nil { + return nil + } + out := new(ConsoleQuickStartTask) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsoleQuickStartTaskReview) DeepCopyInto(out *ConsoleQuickStartTaskReview) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleQuickStartTaskReview. +func (in *ConsoleQuickStartTaskReview) DeepCopy() *ConsoleQuickStartTaskReview { + if in == nil { + return nil + } + out := new(ConsoleQuickStartTaskReview) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsoleQuickStartTaskSummary) DeepCopyInto(out *ConsoleQuickStartTaskSummary) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleQuickStartTaskSummary. +func (in *ConsoleQuickStartTaskSummary) DeepCopy() *ConsoleQuickStartTaskSummary { + if in == nil { + return nil + } + out := new(ConsoleQuickStartTaskSummary) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsoleSample) DeepCopyInto(out *ConsoleSample) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleSample. +func (in *ConsoleSample) DeepCopy() *ConsoleSample { + if in == nil { + return nil + } + out := new(ConsoleSample) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ConsoleSample) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsoleSampleContainerImportSource) DeepCopyInto(out *ConsoleSampleContainerImportSource) { + *out = *in + out.Service = in.Service + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleSampleContainerImportSource. +func (in *ConsoleSampleContainerImportSource) DeepCopy() *ConsoleSampleContainerImportSource { + if in == nil { + return nil + } + out := new(ConsoleSampleContainerImportSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsoleSampleContainerImportSourceService) DeepCopyInto(out *ConsoleSampleContainerImportSourceService) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleSampleContainerImportSourceService. +func (in *ConsoleSampleContainerImportSourceService) DeepCopy() *ConsoleSampleContainerImportSourceService { + if in == nil { + return nil + } + out := new(ConsoleSampleContainerImportSourceService) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsoleSampleGitImportSource) DeepCopyInto(out *ConsoleSampleGitImportSource) { + *out = *in + out.Repository = in.Repository + out.Service = in.Service + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleSampleGitImportSource. +func (in *ConsoleSampleGitImportSource) DeepCopy() *ConsoleSampleGitImportSource { + if in == nil { + return nil + } + out := new(ConsoleSampleGitImportSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsoleSampleGitImportSourceRepository) DeepCopyInto(out *ConsoleSampleGitImportSourceRepository) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleSampleGitImportSourceRepository. +func (in *ConsoleSampleGitImportSourceRepository) DeepCopy() *ConsoleSampleGitImportSourceRepository { + if in == nil { + return nil + } + out := new(ConsoleSampleGitImportSourceRepository) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsoleSampleGitImportSourceService) DeepCopyInto(out *ConsoleSampleGitImportSourceService) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleSampleGitImportSourceService. +func (in *ConsoleSampleGitImportSourceService) DeepCopy() *ConsoleSampleGitImportSourceService { + if in == nil { + return nil + } + out := new(ConsoleSampleGitImportSourceService) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsoleSampleList) DeepCopyInto(out *ConsoleSampleList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ConsoleSample, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleSampleList. +func (in *ConsoleSampleList) DeepCopy() *ConsoleSampleList { + if in == nil { + return nil + } + out := new(ConsoleSampleList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ConsoleSampleList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsoleSampleSource) DeepCopyInto(out *ConsoleSampleSource) { + *out = *in + if in.GitImport != nil { + in, out := &in.GitImport, &out.GitImport + *out = new(ConsoleSampleGitImportSource) + **out = **in + } + if in.ContainerImport != nil { + in, out := &in.ContainerImport, &out.ContainerImport + *out = new(ConsoleSampleContainerImportSource) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleSampleSource. +func (in *ConsoleSampleSource) DeepCopy() *ConsoleSampleSource { + if in == nil { + return nil + } + out := new(ConsoleSampleSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsoleSampleSpec) DeepCopyInto(out *ConsoleSampleSpec) { + *out = *in + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make([]string, len(*in)) + copy(*out, *in) + } + in.Source.DeepCopyInto(&out.Source) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleSampleSpec. +func (in *ConsoleSampleSpec) DeepCopy() *ConsoleSampleSpec { + if in == nil { + return nil + } + out := new(ConsoleSampleSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsoleYAMLSample) DeepCopyInto(out *ConsoleYAMLSample) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleYAMLSample. +func (in *ConsoleYAMLSample) DeepCopy() *ConsoleYAMLSample { + if in == nil { + return nil + } + out := new(ConsoleYAMLSample) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ConsoleYAMLSample) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsoleYAMLSampleList) DeepCopyInto(out *ConsoleYAMLSampleList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ConsoleYAMLSample, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleYAMLSampleList. +func (in *ConsoleYAMLSampleList) DeepCopy() *ConsoleYAMLSampleList { + if in == nil { + return nil + } + out := new(ConsoleYAMLSampleList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ConsoleYAMLSampleList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsoleYAMLSampleSpec) DeepCopyInto(out *ConsoleYAMLSampleSpec) { + *out = *in + out.TargetResource = in.TargetResource + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsoleYAMLSampleSpec. +func (in *ConsoleYAMLSampleSpec) DeepCopy() *ConsoleYAMLSampleSpec { + if in == nil { + return nil + } + out := new(ConsoleYAMLSampleSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Link) DeepCopyInto(out *Link) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Link. +func (in *Link) DeepCopy() *Link { + if in == nil { + return nil + } + out := new(Link) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NamespaceDashboardSpec) DeepCopyInto(out *NamespaceDashboardSpec) { + *out = *in + if in.Namespaces != nil { + in, out := &in.Namespaces, &out.Namespaces + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.NamespaceSelector != nil { + in, out := &in.NamespaceSelector, &out.NamespaceSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespaceDashboardSpec. +func (in *NamespaceDashboardSpec) DeepCopy() *NamespaceDashboardSpec { + if in == nil { + return nil + } + out := new(NamespaceDashboardSpec) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/api/console/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/console/v1/zz_generated.swagger_doc_generated.go new file mode 100644 index 000000000..c6f2070fa --- /dev/null +++ b/vendor/github.com/openshift/api/console/v1/zz_generated.swagger_doc_generated.go @@ -0,0 +1,460 @@ +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_Link = map[string]string{ + "": "Represents a standard link that could be generated in HTML", + "text": "text is the display text for the link", + "href": "href is the absolute secure URL for the link (must use https)", +} + +func (Link) SwaggerDoc() map[string]string { + return map_Link +} + +var map_CLIDownloadLink = map[string]string{ + "text": "text is the display text for the link", + "href": "href is the absolute secure URL for the link (must use https)", +} + +func (CLIDownloadLink) SwaggerDoc() map[string]string { + return map_CLIDownloadLink +} + +var map_ConsoleCLIDownload = map[string]string{ + "": "ConsoleCLIDownload is an extension for configuring openshift web console command line interface (CLI) downloads.\n\nCompatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (ConsoleCLIDownload) SwaggerDoc() map[string]string { + return map_ConsoleCLIDownload +} + +var map_ConsoleCLIDownloadList = map[string]string{ + "": "Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (ConsoleCLIDownloadList) SwaggerDoc() map[string]string { + return map_ConsoleCLIDownloadList +} + +var map_ConsoleCLIDownloadSpec = map[string]string{ + "": "ConsoleCLIDownloadSpec is the desired cli download configuration.", + "displayName": "displayName is the display name of the CLI download.", + "description": "description is the description of the CLI download (can include markdown).", + "links": "links is a list of objects that provide CLI download link details.", +} + +func (ConsoleCLIDownloadSpec) SwaggerDoc() map[string]string { + return map_ConsoleCLIDownloadSpec +} + +var map_ConsoleExternalLogLink = map[string]string{ + "": "ConsoleExternalLogLink is an extension for customizing OpenShift web console log links.\n\nCompatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (ConsoleExternalLogLink) SwaggerDoc() map[string]string { + return map_ConsoleExternalLogLink +} + +var map_ConsoleExternalLogLinkList = map[string]string{ + "": "Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (ConsoleExternalLogLinkList) SwaggerDoc() map[string]string { + return map_ConsoleExternalLogLinkList +} + +var map_ConsoleExternalLogLinkSpec = map[string]string{ + "": "ConsoleExternalLogLinkSpec is the desired log link configuration. The log link will appear on the logs tab of the pod details page.", + "text": "text is the display text for the link", + "hrefTemplate": "hrefTemplate is an absolute secure URL (must use https) for the log link including variables to be replaced. Variables are specified in the URL with the format ${variableName}, for instance, ${containerName} and will be replaced with the corresponding values from the resource. Resource is a pod. Supported variables are: - ${resourceName} - name of the resource which containes the logs - ${resourceUID} - UID of the resource which contains the logs\n - e.g. `11111111-2222-3333-4444-555555555555`\n- ${containerName} - name of the resource's container that contains the logs - ${resourceNamespace} - namespace of the resource that contains the logs - ${resourceNamespaceUID} - namespace UID of the resource that contains the logs - ${podLabels} - JSON representation of labels matching the pod with the logs\n - e.g. `{\"key1\":\"value1\",\"key2\":\"value2\"}`\n\ne.g., https://example.com/logs?resourceName=${resourceName}&containerName=${containerName}&resourceNamespace=${resourceNamespace}&podLabels=${podLabels}", + "namespaceFilter": "namespaceFilter is a regular expression used to restrict a log link to a matching set of namespaces (e.g., `^openshift-`). The string is converted into a regular expression using the JavaScript RegExp constructor. If not specified, links will be displayed for all the namespaces.", +} + +func (ConsoleExternalLogLinkSpec) SwaggerDoc() map[string]string { + return map_ConsoleExternalLogLinkSpec +} + +var map_ApplicationMenuSpec = map[string]string{ + "": "ApplicationMenuSpec is the specification of the desired section and icon used for the link in the application menu.", + "section": "section is the section of the application menu in which the link should appear. This can be any text that will appear as a subheading in the application menu dropdown. A new section will be created if the text does not match text of an existing section.", + "imageURL": "imageUrl is the URL for the icon used in front of the link in the application menu. The URL must be an HTTPS URL or a Data URI. The image should be square and will be shown at 24x24 pixels.", +} + +func (ApplicationMenuSpec) SwaggerDoc() map[string]string { + return map_ApplicationMenuSpec +} + +var map_ConsoleLink = map[string]string{ + "": "ConsoleLink is an extension for customizing OpenShift web console links.\n\nCompatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (ConsoleLink) SwaggerDoc() map[string]string { + return map_ConsoleLink +} + +var map_ConsoleLinkList = map[string]string{ + "": "Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (ConsoleLinkList) SwaggerDoc() map[string]string { + return map_ConsoleLinkList +} + +var map_ConsoleLinkSpec = map[string]string{ + "": "ConsoleLinkSpec is the desired console link configuration.", + "location": "location determines which location in the console the link will be appended to (ApplicationMenu, HelpMenu, UserMenu, NamespaceDashboard).", + "applicationMenu": "applicationMenu holds information about section and icon used for the link in the application menu, and it is applicable only when location is set to ApplicationMenu.", + "namespaceDashboard": "namespaceDashboard holds information about namespaces in which the dashboard link should appear, and it is applicable only when location is set to NamespaceDashboard. If not specified, the link will appear in all namespaces.", +} + +func (ConsoleLinkSpec) SwaggerDoc() map[string]string { + return map_ConsoleLinkSpec +} + +var map_NamespaceDashboardSpec = map[string]string{ + "": "NamespaceDashboardSpec is a specification of namespaces in which the dashboard link should appear. If both namespaces and namespaceSelector are specified, the link will appear in namespaces that match either", + "namespaces": "namespaces is an array of namespace names in which the dashboard link should appear.", + "namespaceSelector": "namespaceSelector is used to select the Namespaces that should contain dashboard link by label. If the namespace labels match, dashboard link will be shown for the namespaces.", +} + +func (NamespaceDashboardSpec) SwaggerDoc() map[string]string { + return map_NamespaceDashboardSpec +} + +var map_ConsoleNotification = map[string]string{ + "": "ConsoleNotification is the extension for configuring openshift web console notifications.\n\nCompatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (ConsoleNotification) SwaggerDoc() map[string]string { + return map_ConsoleNotification +} + +var map_ConsoleNotificationList = map[string]string{ + "": "Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (ConsoleNotificationList) SwaggerDoc() map[string]string { + return map_ConsoleNotificationList +} + +var map_ConsoleNotificationSpec = map[string]string{ + "": "ConsoleNotificationSpec is the desired console notification configuration.", + "text": "text is the visible text of the notification.", + "location": "location is the location of the notification in the console. Valid values are: \"BannerTop\", \"BannerBottom\", \"BannerTopBottom\".", + "link": "link is an object that holds notification link details.", + "color": "color is the color of the text for the notification as CSS data type color.", + "backgroundColor": "backgroundColor is the color of the background for the notification as CSS data type color.", +} + +func (ConsoleNotificationSpec) SwaggerDoc() map[string]string { + return map_ConsoleNotificationSpec +} + +var map_ConsolePlugin = map[string]string{ + "": "ConsolePlugin is an extension for customizing OpenShift web console by dynamically loading code from another service running on the cluster.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (ConsolePlugin) SwaggerDoc() map[string]string { + return map_ConsolePlugin +} + +var map_ConsolePluginBackend = map[string]string{ + "": "ConsolePluginBackend holds information about the endpoint which serves the console's plugin", + "type": "type is the backend type which servers the console's plugin. Currently only \"Service\" is supported.", + "service": "service is a Kubernetes Service that exposes the plugin using a deployment with an HTTP server. The Service must use HTTPS and Service serving certificate. The console backend will proxy the plugins assets from the Service using the service CA bundle.", +} + +func (ConsolePluginBackend) SwaggerDoc() map[string]string { + return map_ConsolePluginBackend +} + +var map_ConsolePluginI18n = map[string]string{ + "": "ConsolePluginI18n holds information on localization resources that are served by the dynamic plugin.", + "loadType": "loadType indicates how the plugin's localization resource should be loaded. Valid values are Preload, Lazy and the empty string. When set to Preload, all localization resources are fetched when the plugin is loaded. When set to Lazy, localization resources are lazily loaded as and when they are required by the console. When omitted or set to the empty string, the behaviour is equivalent to Lazy type.", +} + +func (ConsolePluginI18n) SwaggerDoc() map[string]string { + return map_ConsolePluginI18n +} + +var map_ConsolePluginList = map[string]string{ + "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (ConsolePluginList) SwaggerDoc() map[string]string { + return map_ConsolePluginList +} + +var map_ConsolePluginProxy = map[string]string{ + "": "ConsolePluginProxy holds information on various service types to which console's backend will proxy the plugin's requests.", + "endpoint": "endpoint provides information about endpoint to which the request is proxied to.", + "alias": "alias is a proxy name that identifies the plugin's proxy. An alias name should be unique per plugin. The console backend exposes following proxy endpoint:\n\n/api/proxy/plugin///?\n\nRequest example path:\n\n/api/proxy/plugin/acm/search/pods?namespace=openshift-apiserver", + "caCertificate": "caCertificate provides the cert authority certificate contents, in case the proxied Service is using custom service CA. By default, the service CA bundle provided by the service-ca operator is used. ", + "authorization": "authorization provides information about authorization type, which the proxied request should contain", +} + +func (ConsolePluginProxy) SwaggerDoc() map[string]string { + return map_ConsolePluginProxy +} + +var map_ConsolePluginProxyEndpoint = map[string]string{ + "": "ConsolePluginProxyEndpoint holds information about the endpoint to which request will be proxied to.", + "type": "type is the type of the console plugin's proxy. Currently only \"Service\" is supported.", + "service": "service is an in-cluster Service that the plugin will connect to. The Service must use HTTPS. The console backend exposes an endpoint in order to proxy communication between the plugin and the Service. Note: service field is required for now, since currently only \"Service\" type is supported.", +} + +func (ConsolePluginProxyEndpoint) SwaggerDoc() map[string]string { + return map_ConsolePluginProxyEndpoint +} + +var map_ConsolePluginProxyServiceConfig = map[string]string{ + "": "ProxyTypeServiceConfig holds information on Service to which console's backend will proxy the plugin's requests.", + "name": "name of Service that the plugin needs to connect to.", + "namespace": "namespace of Service that the plugin needs to connect to", + "port": "port on which the Service that the plugin needs to connect to is listening on.", +} + +func (ConsolePluginProxyServiceConfig) SwaggerDoc() map[string]string { + return map_ConsolePluginProxyServiceConfig +} + +var map_ConsolePluginService = map[string]string{ + "": "ConsolePluginService holds information on Service that is serving console dynamic plugin assets.", + "name": "name of Service that is serving the plugin assets.", + "namespace": "namespace of Service that is serving the plugin assets.", + "port": "port on which the Service that is serving the plugin is listening to.", + "basePath": "basePath is the path to the plugin's assets. The primary asset it the manifest file called `plugin-manifest.json`, which is a JSON document that contains metadata about the plugin and the extensions.", +} + +func (ConsolePluginService) SwaggerDoc() map[string]string { + return map_ConsolePluginService +} + +var map_ConsolePluginSpec = map[string]string{ + "": "ConsolePluginSpec is the desired plugin configuration.", + "displayName": "displayName is the display name of the plugin. The dispalyName should be between 1 and 128 characters.", + "backend": "backend holds the configuration of backend which is serving console's plugin .", + "proxy": "proxy is a list of proxies that describe various service type to which the plugin needs to connect to.", + "i18n": "i18n is the configuration of plugin's localization resources.", +} + +func (ConsolePluginSpec) SwaggerDoc() map[string]string { + return map_ConsolePluginSpec +} + +var map_ConsoleQuickStart = map[string]string{ + "": "ConsoleQuickStart is an extension for guiding user through various workflows in the OpenShift web console.\n\nCompatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (ConsoleQuickStart) SwaggerDoc() map[string]string { + return map_ConsoleQuickStart +} + +var map_ConsoleQuickStartList = map[string]string{ + "": "Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (ConsoleQuickStartList) SwaggerDoc() map[string]string { + return map_ConsoleQuickStartList +} + +var map_ConsoleQuickStartSpec = map[string]string{ + "": "ConsoleQuickStartSpec is the desired quick start configuration.", + "displayName": "displayName is the display name of the Quick Start.", + "icon": "icon is a base64 encoded image that will be displayed beside the Quick Start display name. The icon should be an vector image for easy scaling. The size of the icon should be 40x40.", + "tags": "tags is a list of strings that describe the Quick Start.", + "durationMinutes": "durationMinutes describes approximately how many minutes it will take to complete the Quick Start.", + "description": "description is the description of the Quick Start. (includes markdown)", + "prerequisites": "prerequisites contains all prerequisites that need to be met before taking a Quick Start. (includes markdown)", + "introduction": "introduction describes the purpose of the Quick Start. (includes markdown)", + "tasks": "tasks is the list of steps the user has to perform to complete the Quick Start.", + "conclusion": "conclusion sums up the Quick Start and suggests the possible next steps. (includes markdown)", + "nextQuickStart": "nextQuickStart is a list of the following Quick Starts, suggested for the user to try.", + "accessReviewResources": "accessReviewResources contains a list of resources that the user's access will be reviewed against in order for the user to complete the Quick Start. The Quick Start will be hidden if any of the access reviews fail.", +} + +func (ConsoleQuickStartSpec) SwaggerDoc() map[string]string { + return map_ConsoleQuickStartSpec +} + +var map_ConsoleQuickStartTask = map[string]string{ + "": "ConsoleQuickStartTask is a single step in a Quick Start.", + "title": "title describes the task and is displayed as a step heading.", + "description": "description describes the steps needed to complete the task. (includes markdown)", + "review": "review contains instructions to validate the task is complete. The user will select 'Yes' or 'No'. using a radio button, which indicates whether the step was completed successfully.", + "summary": "summary contains information about the passed step.", +} + +func (ConsoleQuickStartTask) SwaggerDoc() map[string]string { + return map_ConsoleQuickStartTask +} + +var map_ConsoleQuickStartTaskReview = map[string]string{ + "": "ConsoleQuickStartTaskReview contains instructions that validate a task was completed successfully.", + "instructions": "instructions contains steps that user needs to take in order to validate his work after going through a task. (includes markdown)", + "failedTaskHelp": "failedTaskHelp contains suggestions for a failed task review and is shown at the end of task. (includes markdown)", +} + +func (ConsoleQuickStartTaskReview) SwaggerDoc() map[string]string { + return map_ConsoleQuickStartTaskReview +} + +var map_ConsoleQuickStartTaskSummary = map[string]string{ + "": "ConsoleQuickStartTaskSummary contains information about a passed step.", + "success": "success describes the succesfully passed task.", + "failed": "failed briefly describes the unsuccessfully passed task. (includes markdown)", +} + +func (ConsoleQuickStartTaskSummary) SwaggerDoc() map[string]string { + return map_ConsoleQuickStartTaskSummary +} + +var map_ConsoleSample = map[string]string{ + "": "ConsoleSample is an extension to customizing OpenShift web console by adding samples.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec contains configuration for a console sample.", +} + +func (ConsoleSample) SwaggerDoc() map[string]string { + return map_ConsoleSample +} + +var map_ConsoleSampleContainerImportSource = map[string]string{ + "": "ConsoleSampleContainerImportSource let the user import a container image.", + "image": "reference to a container image that provides a HTTP service. The service must be exposed on the default port (8080) unless otherwise configured with the port field.\n\nSupported formats:\n - /\n - docker.io//\n - quay.io//\n - quay.io//@sha256:\n - quay.io//:", + "service": "service contains configuration for the Service resource created for this sample.", +} + +func (ConsoleSampleContainerImportSource) SwaggerDoc() map[string]string { + return map_ConsoleSampleContainerImportSource +} + +var map_ConsoleSampleContainerImportSourceService = map[string]string{ + "": "ConsoleSampleContainerImportSourceService let the samples author define defaults for the Service created for this sample.", + "targetPort": "targetPort is the port that the service listens on for HTTP requests. This port will be used for Service and Route created for this sample. Port must be in the range 1 to 65535. Default port is 8080.", +} + +func (ConsoleSampleContainerImportSourceService) SwaggerDoc() map[string]string { + return map_ConsoleSampleContainerImportSourceService +} + +var map_ConsoleSampleGitImportSource = map[string]string{ + "": "ConsoleSampleGitImportSource let the user import code from a public Git repository.", + "repository": "repository contains the reference to the actual Git repository.", + "service": "service contains configuration for the Service resource created for this sample.", +} + +func (ConsoleSampleGitImportSource) SwaggerDoc() map[string]string { + return map_ConsoleSampleGitImportSource +} + +var map_ConsoleSampleGitImportSourceRepository = map[string]string{ + "": "ConsoleSampleGitImportSourceRepository let the user import code from a public git repository.", + "url": "url of the Git repository that contains a HTTP service. The HTTP service must be exposed on the default port (8080) unless otherwise configured with the port field.\n\nOnly public repositories on GitHub, GitLab and Bitbucket are currently supported:\n\n - https://github.com//\n - https://gitlab.com//\n - https://bitbucket.org//\n\nThe url must have a maximum length of 256 characters.", + "revision": "revision is the git revision at which to clone the git repository Can be used to clone a specific branch, tag or commit SHA. Must be at most 256 characters in length. When omitted the repository's default branch is used.", + "contextDir": "contextDir is used to specify a directory within the repository to build the component. Must start with `/` and have a maximum length of 256 characters. When omitted, the default value is to build from the root of the repository.", +} + +func (ConsoleSampleGitImportSourceRepository) SwaggerDoc() map[string]string { + return map_ConsoleSampleGitImportSourceRepository +} + +var map_ConsoleSampleGitImportSourceService = map[string]string{ + "": "ConsoleSampleGitImportSourceService let the samples author define defaults for the Service created for this sample.", + "targetPort": "targetPort is the port that the service listens on for HTTP requests. This port will be used for Service created for this sample. Port must be in the range 1 to 65535. Default port is 8080.", +} + +func (ConsoleSampleGitImportSourceService) SwaggerDoc() map[string]string { + return map_ConsoleSampleGitImportSourceService +} + +var map_ConsoleSampleList = map[string]string{ + "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (ConsoleSampleList) SwaggerDoc() map[string]string { + return map_ConsoleSampleList +} + +var map_ConsoleSampleSource = map[string]string{ + "": "ConsoleSampleSource is the actual sample definition and can hold different sample types. Unsupported sample types will be ignored in the web console.", + "type": "type of the sample, currently supported: \"GitImport\";\"ContainerImport\"", + "gitImport": "gitImport allows the user to import code from a git repository.", + "containerImport": "containerImport allows the user import a container image.", +} + +func (ConsoleSampleSource) SwaggerDoc() map[string]string { + return map_ConsoleSampleSource +} + +var map_ConsoleSampleSpec = map[string]string{ + "": "ConsoleSampleSpec is the desired sample for the web console. Samples will appear with their title, descriptions and a badge in a samples catalog.", + "title": "title is the display name of the sample.\n\nIt is required and must be no more than 50 characters in length.", + "abstract": "abstract is a short introduction to the sample.\n\nIt is required and must be no more than 100 characters in length.\n\nThe abstract is shown on the sample card tile below the title and provider and is limited to three lines of content.", + "description": "description is a long form explanation of the sample.\n\nIt is required and can have a maximum length of **4096** characters.\n\nIt is a README.md-like content for additional information, links, pre-conditions, and other instructions. It will be rendered as Markdown so that it can contain line breaks, links, and other simple formatting.", + "icon": "icon is an optional base64 encoded image and shown beside the sample title.\n\nThe format must follow the data: URL format and can have a maximum size of **10 KB**.\n\n data:[][;base64],\n\nFor example:\n\n data:image;base64, plus the base64 encoded image.\n\nVector images can also be used. SVG icons must start with:\n\n data:image/svg+xml;base64, plus the base64 encoded SVG image.\n\nAll sample catalog icons will be shown on a white background (also when the dark theme is used). The web console ensures that different aspect ratios work correctly. Currently, the surface of the icon is at most 40x100px.\n\nFor more information on the data URL format, please visit https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/Data_URLs.", + "type": "type is an optional label to group multiple samples.\n\nIt is optional and must be no more than 20 characters in length.\n\nRecommendation is a singular term like \"Builder Image\", \"Devfile\" or \"Serverless Function\".\n\nCurrently, the type is shown a badge on the sample card tile in the top right corner.", + "provider": "provider is an optional label to honor who provides the sample.\n\nIt is optional and must be no more than 50 characters in length.\n\nA provider can be a company like \"Red Hat\" or an organization like \"CNCF\" or \"Knative\".\n\nCurrently, the provider is only shown on the sample card tile below the title with the prefix \"Provided by \"", + "tags": "tags are optional string values that can be used to find samples in the samples catalog.\n\nExamples of common tags may be \"Java\", \"Quarkus\", etc.\n\nThey will be displayed on the samples details page.", + "source": "source defines where to deploy the sample service from. The sample may be sourced from an external git repository or container image.", +} + +func (ConsoleSampleSpec) SwaggerDoc() map[string]string { + return map_ConsoleSampleSpec +} + +var map_ConsoleYAMLSample = map[string]string{ + "": "ConsoleYAMLSample is an extension for customizing OpenShift web console YAML samples.\n\nCompatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (ConsoleYAMLSample) SwaggerDoc() map[string]string { + return map_ConsoleYAMLSample +} + +var map_ConsoleYAMLSampleList = map[string]string{ + "": "Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (ConsoleYAMLSampleList) SwaggerDoc() map[string]string { + return map_ConsoleYAMLSampleList +} + +var map_ConsoleYAMLSampleSpec = map[string]string{ + "": "ConsoleYAMLSampleSpec is the desired YAML sample configuration. Samples will appear with their descriptions in a samples sidebar when creating a resources in the web console.", + "targetResource": "targetResource contains apiVersion and kind of the resource YAML sample is representating.", + "title": "title of the YAML sample.", + "description": "description of the YAML sample.", + "yaml": "yaml is the YAML sample to display.", + "snippet": "snippet indicates that the YAML sample is not the full YAML resource definition, but a fragment that can be inserted into the existing YAML document at the user's cursor.", +} + +func (ConsoleYAMLSampleSpec) SwaggerDoc() map[string]string { + return map_ConsoleYAMLSampleSpec +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/api/console/v1alpha1/90_consoleplugin.crd.yaml b/vendor/github.com/openshift/api/console/v1alpha1/90_consoleplugin.crd.yaml new file mode 100644 index 000000000..52034c96d --- /dev/null +++ b/vendor/github.com/openshift/api/console/v1alpha1/90_consoleplugin.crd.yaml @@ -0,0 +1,294 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/764 + capability.openshift.io/name: Console + description: Extension for configuring openshift web console plugins. + displayName: ConsolePlugin + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + service.beta.openshift.io/inject-cabundle: "true" + name: consoleplugins.console.openshift.io +spec: + conversion: + strategy: Webhook + webhook: + clientConfig: + service: + name: webhook + namespace: openshift-console-operator + path: /crdconvert + port: 9443 + conversionReviewVersions: + - v1 + - v1alpha1 + group: console.openshift.io + names: + kind: ConsolePlugin + listKind: ConsolePluginList + plural: consoleplugins + singular: consoleplugin + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "ConsolePlugin is an extension for customizing OpenShift web console by dynamically loading code from another service running on the cluster. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + type: object + required: + - metadata + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ConsolePluginSpec is the desired plugin configuration. + type: object + required: + - backend + - displayName + properties: + backend: + description: backend holds the configuration of backend which is serving console's plugin . + type: object + required: + - type + properties: + service: + description: service is a Kubernetes Service that exposes the plugin using a deployment with an HTTP server. The Service must use HTTPS and Service serving certificate. The console backend will proxy the plugins assets from the Service using the service CA bundle. + type: object + required: + - name + - namespace + - port + properties: + basePath: + description: basePath is the path to the plugin's assets. The primary asset it the manifest file called `plugin-manifest.json`, which is a JSON document that contains metadata about the plugin and the extensions. + type: string + default: / + maxLength: 256 + minLength: 1 + pattern: ^[a-zA-Z0-9.\-_~!$&'()*+,;=:@\/]*$ + name: + description: name of Service that is serving the plugin assets. + type: string + maxLength: 128 + minLength: 1 + namespace: + description: namespace of Service that is serving the plugin assets. + type: string + maxLength: 128 + minLength: 1 + port: + description: port on which the Service that is serving the plugin is listening to. + type: integer + format: int32 + maximum: 65535 + minimum: 1 + type: + description: "type is the backend type which servers the console's plugin. Currently only \"Service\" is supported. \n ---" + type: string + enum: + - Service + displayName: + description: displayName is the display name of the plugin. The dispalyName should be between 1 and 128 characters. + type: string + maxLength: 128 + minLength: 1 + i18n: + description: i18n is the configuration of plugin's localization resources. + type: object + required: + - loadType + properties: + loadType: + description: loadType indicates how the plugin's localization resource should be loaded. Valid values are Preload, Lazy and the empty string. When set to Preload, all localization resources are fetched when the plugin is loaded. When set to Lazy, localization resources are lazily loaded as and when they are required by the console. When omitted or set to the empty string, the behaviour is equivalent to Lazy type. + type: string + enum: + - Preload + - Lazy + - "" + proxy: + description: proxy is a list of proxies that describe various service type to which the plugin needs to connect to. + type: array + items: + description: ConsolePluginProxy holds information on various service types to which console's backend will proxy the plugin's requests. + type: object + required: + - alias + - endpoint + properties: + alias: + description: "alias is a proxy name that identifies the plugin's proxy. An alias name should be unique per plugin. The console backend exposes following proxy endpoint: \n /api/proxy/plugin///? \n Request example path: \n /api/proxy/plugin/acm/search/pods?namespace=openshift-apiserver" + type: string + maxLength: 128 + minLength: 1 + pattern: ^[A-Za-z0-9-_]+$ + authorization: + description: authorization provides information about authorization type, which the proxied request should contain + type: string + default: None + enum: + - UserToken + - None + caCertificate: + description: caCertificate provides the cert authority certificate contents, in case the proxied Service is using custom service CA. By default, the service CA bundle provided by the service-ca operator is used. + type: string + pattern: ^-----BEGIN CERTIFICATE-----([\s\S]*)-----END CERTIFICATE-----\s?$ + endpoint: + description: endpoint provides information about endpoint to which the request is proxied to. + type: object + required: + - type + properties: + service: + description: 'service is an in-cluster Service that the plugin will connect to. The Service must use HTTPS. The console backend exposes an endpoint in order to proxy communication between the plugin and the Service. Note: service field is required for now, since currently only "Service" type is supported.' + type: object + required: + - name + - namespace + - port + properties: + name: + description: name of Service that the plugin needs to connect to. + type: string + maxLength: 128 + minLength: 1 + namespace: + description: namespace of Service that the plugin needs to connect to + type: string + maxLength: 128 + minLength: 1 + port: + description: port on which the Service that the plugin needs to connect to is listening on. + type: integer + format: int32 + maximum: 65535 + minimum: 1 + type: + description: "type is the type of the console plugin's proxy. Currently only \"Service\" is supported. \n ---" + type: string + enum: + - Service + served: true + storage: false + - name: v1alpha1 + schema: + openAPIV3Schema: + description: "ConsolePlugin is an extension for customizing OpenShift web console by dynamically loading code from another service running on the cluster. \n Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support." + type: object + required: + - metadata + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ConsolePluginSpec is the desired plugin configuration. + type: object + required: + - service + properties: + displayName: + description: displayName is the display name of the plugin. + type: string + minLength: 1 + proxy: + description: proxy is a list of proxies that describe various service type to which the plugin needs to connect to. + type: array + items: + description: ConsolePluginProxy holds information on various service types to which console's backend will proxy the plugin's requests. + type: object + required: + - alias + - type + properties: + alias: + description: "alias is a proxy name that identifies the plugin's proxy. An alias name should be unique per plugin. The console backend exposes following proxy endpoint: \n /api/proxy/plugin///? \n Request example path: \n /api/proxy/plugin/acm/search/pods?namespace=openshift-apiserver" + type: string + maxLength: 128 + minLength: 1 + pattern: ^[A-Za-z0-9-_]+$ + authorize: + description: "authorize indicates if the proxied request should contain the logged-in user's OpenShift access token in the \"Authorization\" request header. For example: \n Authorization: Bearer sha256~kV46hPnEYhCWFnB85r5NrprAxggzgb6GOeLbgcKNsH0 \n By default the access token is not part of the proxied request." + type: boolean + default: false + caCertificate: + description: caCertificate provides the cert authority certificate contents, in case the proxied Service is using custom service CA. By default, the service CA bundle provided by the service-ca operator is used. + type: string + pattern: ^-----BEGIN CERTIFICATE-----([\s\S]*)-----END CERTIFICATE-----\s?$ + service: + description: 'service is an in-cluster Service that the plugin will connect to. The Service must use HTTPS. The console backend exposes an endpoint in order to proxy communication between the plugin and the Service. Note: service field is required for now, since currently only "Service" type is supported.' + type: object + required: + - name + - namespace + - port + properties: + name: + description: name of Service that the plugin needs to connect to. + type: string + maxLength: 128 + minLength: 1 + namespace: + description: namespace of Service that the plugin needs to connect to + type: string + maxLength: 128 + minLength: 1 + port: + description: port on which the Service that the plugin needs to connect to is listening on. + type: integer + format: int32 + maximum: 65535 + minimum: 1 + type: + description: type is the type of the console plugin's proxy. Currently only "Service" is supported. + type: string + pattern: ^(Service)$ + service: + description: service is a Kubernetes Service that exposes the plugin using a deployment with an HTTP server. The Service must use HTTPS and Service serving certificate. The console backend will proxy the plugins assets from the Service using the service CA bundle. + type: object + required: + - basePath + - name + - namespace + - port + properties: + basePath: + description: basePath is the path to the plugin's assets. The primary asset it the manifest file called `plugin-manifest.json`, which is a JSON document that contains metadata about the plugin and the extensions. + type: string + default: / + minLength: 1 + pattern: ^/ + name: + description: name of Service that is serving the plugin assets. + type: string + maxLength: 128 + minLength: 1 + namespace: + description: namespace of Service that is serving the plugin assets. + type: string + maxLength: 128 + minLength: 1 + port: + description: port on which the Service that is serving the plugin is listening to. + type: integer + format: int32 + maximum: 65535 + minimum: 1 + served: true + storage: true diff --git a/vendor/github.com/openshift/api/console/v1alpha1/Makefile b/vendor/github.com/openshift/api/console/v1alpha1/Makefile new file mode 100644 index 000000000..022487a15 --- /dev/null +++ b/vendor/github.com/openshift/api/console/v1alpha1/Makefile @@ -0,0 +1,3 @@ +.PHONY: test +test: + make -C ../../tests test GINKGO_EXTRA_ARGS=--focus="console.openshift.io/v1alpha1" diff --git a/vendor/github.com/openshift/api/console/v1alpha1/doc.go b/vendor/github.com/openshift/api/console/v1alpha1/doc.go new file mode 100644 index 000000000..67ac59bc1 --- /dev/null +++ b/vendor/github.com/openshift/api/console/v1alpha1/doc.go @@ -0,0 +1,6 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true + +// +groupName=console.openshift.io +package v1alpha1 diff --git a/vendor/github.com/openshift/api/console/v1alpha1/register.go b/vendor/github.com/openshift/api/console/v1alpha1/register.go new file mode 100644 index 000000000..a21f00803 --- /dev/null +++ b/vendor/github.com/openshift/api/console/v1alpha1/register.go @@ -0,0 +1,39 @@ +package v1alpha1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + GroupName = "console.openshift.io" + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, corev1.AddToScheme) + // Install is a function which adds this version to a scheme + Install = schemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = schemeBuilder.AddToScheme +) + +// Resource generated code relies on this being here, but it logically belongs to the group +// DEPRECATED +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +// addKnownTypes adds types to API group +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &ConsolePlugin{}, + &ConsolePluginList{}, + ) + metav1.AddToGroupVersion(scheme, GroupVersion) + return nil +} diff --git a/vendor/github.com/openshift/api/console/v1alpha1/stable.consoleplugin.testsuite.yaml b/vendor/github.com/openshift/api/console/v1alpha1/stable.consoleplugin.testsuite.yaml new file mode 100644 index 000000000..d861a6543 --- /dev/null +++ b/vendor/github.com/openshift/api/console/v1alpha1/stable.consoleplugin.testsuite.yaml @@ -0,0 +1,23 @@ +apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this +name: "[Stable] ConsolePlugin" +crd: 90_consoleplugin.crd.yaml +tests: + onCreate: + - name: Should be able to create a minimal ConsolePlugin + initial: | + apiVersion: console.openshift.io/v1alpha1 + kind: ConsolePlugin + spec: + service: + name: foo + namespace: foo + port: 80 + expected: | + apiVersion: console.openshift.io/v1alpha1 + kind: ConsolePlugin + spec: + service: + name: foo + namespace: foo + port: 80 + basePath: / diff --git a/vendor/github.com/openshift/api/console/v1alpha1/types.go b/vendor/github.com/openshift/api/console/v1alpha1/types.go new file mode 100644 index 000000000..1c267880d --- /dev/null +++ b/vendor/github.com/openshift/api/console/v1alpha1/types.go @@ -0,0 +1 @@ +package v1alpha1 diff --git a/vendor/github.com/openshift/api/console/v1alpha1/types_console_plugin.go b/vendor/github.com/openshift/api/console/v1alpha1/types_console_plugin.go new file mode 100644 index 000000000..835e20010 --- /dev/null +++ b/vendor/github.com/openshift/api/console/v1alpha1/types_console_plugin.go @@ -0,0 +1,174 @@ +package v1alpha1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +openshift:compatibility-gen:level=4 + +// ConsolePlugin is an extension for customizing OpenShift web console by +// dynamically loading code from another service running on the cluster. +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +type ConsolePlugin struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata"` + + // +kubebuilder:validation:Required + // +required + Spec ConsolePluginSpec `json:"spec"` +} + +// ConsolePluginSpec is the desired plugin configuration. +type ConsolePluginSpec struct { + // displayName is the display name of the plugin. + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +optional + DisplayName string `json:"displayName,omitempty"` + // service is a Kubernetes Service that exposes the plugin using a + // deployment with an HTTP server. The Service must use HTTPS and + // Service serving certificate. The console backend will proxy the + // plugins assets from the Service using the service CA bundle. + // +kubebuilder:validation:Required + // +required + Service ConsolePluginService `json:"service"` + // proxy is a list of proxies that describe various service type + // to which the plugin needs to connect to. + // +kubebuilder:validation:Optional + // +optional + Proxy []ConsolePluginProxy `json:"proxy,omitempty"` +} + +// ConsolePluginProxy holds information on various service types +// to which console's backend will proxy the plugin's requests. +type ConsolePluginProxy struct { + // type is the type of the console plugin's proxy. Currently only "Service" is supported. + // +kubebuilder:validation:Required + // +required + Type ConsolePluginProxyType `json:"type"` + // alias is a proxy name that identifies the plugin's proxy. An alias name + // should be unique per plugin. The console backend exposes following + // proxy endpoint: + // + // /api/proxy/plugin///? + // + // Request example path: + // + // /api/proxy/plugin/acm/search/pods?namespace=openshift-apiserver + // + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=128 + // +kubebuilder:validation:Pattern=`^[A-Za-z0-9-_]+$` + // +required + Alias string `json:"alias"` + // service is an in-cluster Service that the plugin will connect to. + // The Service must use HTTPS. The console backend exposes an endpoint + // in order to proxy communication between the plugin and the Service. + // Note: service field is required for now, since currently only "Service" + // type is supported. + // +kubebuilder:validation:Required + // +required + Service ConsolePluginProxyServiceConfig `json:"service,omitempty"` + // caCertificate provides the cert authority certificate contents, + // in case the proxied Service is using custom service CA. + // By default, the service CA bundle provided by the service-ca operator is used. + // +kubebuilder:validation:Pattern=`^-----BEGIN CERTIFICATE-----([\s\S]*)-----END CERTIFICATE-----\s?$` + // +kubebuilder:validation:Optional + // +optional + CACertificate string `json:"caCertificate,omitempty"` + // authorize indicates if the proxied request should contain the logged-in user's + // OpenShift access token in the "Authorization" request header. For example: + // + // Authorization: Bearer sha256~kV46hPnEYhCWFnB85r5NrprAxggzgb6GOeLbgcKNsH0 + // + // By default the access token is not part of the proxied request. + // +kubebuilder:default:=false + // +kubebuilder:validation:Optional + // +optional + Authorize bool `json:"authorize,omitempty"` +} + +// ProxyType is an enumeration of available proxy types +// +kubebuilder:validation:Pattern=`^(Service)$` +type ConsolePluginProxyType string + +const ( + // ProxyTypeService is used when proxying communication to a Service + ProxyTypeService ConsolePluginProxyType = "Service" +) + +// ProxyTypeServiceConfig holds information on Service to which +// console's backend will proxy the plugin's requests. +type ConsolePluginProxyServiceConfig struct { + // name of Service that the plugin needs to connect to. + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=128 + // +required + Name string `json:"name"` + // namespace of Service that the plugin needs to connect to + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=128 + // +required + Namespace string `json:"namespace"` + // port on which the Service that the plugin needs to connect to + // is listening on. + // +kubebuilder:validation:Required + // +kubebuilder:validation:Maximum:=65535 + // +kubebuilder:validation:Minimum:=1 + // +required + Port int32 `json:"port"` +} + +// ConsolePluginService holds information on Service that is serving +// console dynamic plugin assets. +type ConsolePluginService struct { + // name of Service that is serving the plugin assets. + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=128 + // +required + Name string `json:"name"` + // namespace of Service that is serving the plugin assets. + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=128 + // +required + Namespace string `json:"namespace"` + // port on which the Service that is serving the plugin is listening to. + // +kubebuilder:validation:Required + // +kubebuilder:validation:Maximum:=65535 + // +kubebuilder:validation:Minimum:=1 + // +required + Port int32 `json:"port"` + // basePath is the path to the plugin's assets. The primary asset it the + // manifest file called `plugin-manifest.json`, which is a JSON document + // that contains metadata about the plugin and the extensions. + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:Pattern=`^/` + // +kubebuilder:default:="/" + // +required + BasePath string `json:"basePath"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +openshift:compatibility-gen:level=4 + +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +type ConsolePluginList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + Items []ConsolePlugin `json:"items"` +} diff --git a/vendor/github.com/openshift/api/console/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/console/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 000000000..87b68c6b1 --- /dev/null +++ b/vendor/github.com/openshift/api/console/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,141 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsolePlugin) DeepCopyInto(out *ConsolePlugin) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsolePlugin. +func (in *ConsolePlugin) DeepCopy() *ConsolePlugin { + if in == nil { + return nil + } + out := new(ConsolePlugin) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ConsolePlugin) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsolePluginList) DeepCopyInto(out *ConsolePluginList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ConsolePlugin, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsolePluginList. +func (in *ConsolePluginList) DeepCopy() *ConsolePluginList { + if in == nil { + return nil + } + out := new(ConsolePluginList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ConsolePluginList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsolePluginProxy) DeepCopyInto(out *ConsolePluginProxy) { + *out = *in + out.Service = in.Service + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsolePluginProxy. +func (in *ConsolePluginProxy) DeepCopy() *ConsolePluginProxy { + if in == nil { + return nil + } + out := new(ConsolePluginProxy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsolePluginProxyServiceConfig) DeepCopyInto(out *ConsolePluginProxyServiceConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsolePluginProxyServiceConfig. +func (in *ConsolePluginProxyServiceConfig) DeepCopy() *ConsolePluginProxyServiceConfig { + if in == nil { + return nil + } + out := new(ConsolePluginProxyServiceConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsolePluginService) DeepCopyInto(out *ConsolePluginService) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsolePluginService. +func (in *ConsolePluginService) DeepCopy() *ConsolePluginService { + if in == nil { + return nil + } + out := new(ConsolePluginService) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsolePluginSpec) DeepCopyInto(out *ConsolePluginSpec) { + *out = *in + out.Service = in.Service + if in.Proxy != nil { + in, out := &in.Proxy, &out.Proxy + *out = make([]ConsolePluginProxy, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsolePluginSpec. +func (in *ConsolePluginSpec) DeepCopy() *ConsolePluginSpec { + if in == nil { + return nil + } + out := new(ConsolePluginSpec) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/api/console/v1alpha1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/console/v1alpha1/zz_generated.swagger_doc_generated.go new file mode 100644 index 000000000..13b5646e7 --- /dev/null +++ b/vendor/github.com/openshift/api/console/v1alpha1/zz_generated.swagger_doc_generated.go @@ -0,0 +1,79 @@ +package v1alpha1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_ConsolePlugin = map[string]string{ + "": "ConsolePlugin is an extension for customizing OpenShift web console by dynamically loading code from another service running on the cluster.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (ConsolePlugin) SwaggerDoc() map[string]string { + return map_ConsolePlugin +} + +var map_ConsolePluginList = map[string]string{ + "": "Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (ConsolePluginList) SwaggerDoc() map[string]string { + return map_ConsolePluginList +} + +var map_ConsolePluginProxy = map[string]string{ + "": "ConsolePluginProxy holds information on various service types to which console's backend will proxy the plugin's requests.", + "type": "type is the type of the console plugin's proxy. Currently only \"Service\" is supported.", + "alias": "alias is a proxy name that identifies the plugin's proxy. An alias name should be unique per plugin. The console backend exposes following proxy endpoint:\n\n/api/proxy/plugin///?\n\nRequest example path:\n\n/api/proxy/plugin/acm/search/pods?namespace=openshift-apiserver", + "service": "service is an in-cluster Service that the plugin will connect to. The Service must use HTTPS. The console backend exposes an endpoint in order to proxy communication between the plugin and the Service. Note: service field is required for now, since currently only \"Service\" type is supported.", + "caCertificate": "caCertificate provides the cert authority certificate contents, in case the proxied Service is using custom service CA. By default, the service CA bundle provided by the service-ca operator is used. ", + "authorize": "authorize indicates if the proxied request should contain the logged-in user's OpenShift access token in the \"Authorization\" request header. For example:\n\nAuthorization: Bearer sha256~kV46hPnEYhCWFnB85r5NrprAxggzgb6GOeLbgcKNsH0\n\nBy default the access token is not part of the proxied request.", +} + +func (ConsolePluginProxy) SwaggerDoc() map[string]string { + return map_ConsolePluginProxy +} + +var map_ConsolePluginProxyServiceConfig = map[string]string{ + "": "ProxyTypeServiceConfig holds information on Service to which console's backend will proxy the plugin's requests.", + "name": "name of Service that the plugin needs to connect to.", + "namespace": "namespace of Service that the plugin needs to connect to", + "port": "port on which the Service that the plugin needs to connect to is listening on.", +} + +func (ConsolePluginProxyServiceConfig) SwaggerDoc() map[string]string { + return map_ConsolePluginProxyServiceConfig +} + +var map_ConsolePluginService = map[string]string{ + "": "ConsolePluginService holds information on Service that is serving console dynamic plugin assets.", + "name": "name of Service that is serving the plugin assets.", + "namespace": "namespace of Service that is serving the plugin assets.", + "port": "port on which the Service that is serving the plugin is listening to.", + "basePath": "basePath is the path to the plugin's assets. The primary asset it the manifest file called `plugin-manifest.json`, which is a JSON document that contains metadata about the plugin and the extensions.", +} + +func (ConsolePluginService) SwaggerDoc() map[string]string { + return map_ConsolePluginService +} + +var map_ConsolePluginSpec = map[string]string{ + "": "ConsolePluginSpec is the desired plugin configuration.", + "displayName": "displayName is the display name of the plugin.", + "service": "service is a Kubernetes Service that exposes the plugin using a deployment with an HTTP server. The Service must use HTTPS and Service serving certificate. The console backend will proxy the plugins assets from the Service using the service CA bundle.", + "proxy": "proxy is a list of proxies that describe various service type to which the plugin needs to connect to.", +} + +func (ConsolePluginSpec) SwaggerDoc() map[string]string { + return map_ConsolePluginSpec +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/api/helm/.codegen.yaml b/vendor/github.com/openshift/api/helm/.codegen.yaml new file mode 100644 index 000000000..ffa2c8d9b --- /dev/null +++ b/vendor/github.com/openshift/api/helm/.codegen.yaml @@ -0,0 +1,2 @@ +swaggerdocs: + commentPolicy: Warn diff --git a/vendor/github.com/openshift/api/helm/install.go b/vendor/github.com/openshift/api/helm/install.go new file mode 100644 index 000000000..6c8f51892 --- /dev/null +++ b/vendor/github.com/openshift/api/helm/install.go @@ -0,0 +1,26 @@ +package helm + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + helmv1beta1 "github.com/openshift/api/helm/v1beta1" +) + +const ( + GroupName = "helm.openshift.io" +) + +var ( + schemeBuilder = runtime.NewSchemeBuilder(helmv1beta1.Install) + // Install is a function which adds every version of this group to a scheme + Install = schemeBuilder.AddToScheme +) + +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +func Kind(kind string) schema.GroupKind { + return schema.GroupKind{Group: GroupName, Kind: kind} +} diff --git a/vendor/github.com/openshift/api/helm/v1beta1/0000_10-helm-chart-repository.crd.yaml b/vendor/github.com/openshift/api/helm/v1beta1/0000_10-helm-chart-repository.crd.yaml new file mode 100644 index 000000000..4ae9a66b1 --- /dev/null +++ b/vendor/github.com/openshift/api/helm/v1beta1/0000_10-helm-chart-repository.crd.yaml @@ -0,0 +1,130 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/598 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: helmchartrepositories.helm.openshift.io +spec: + group: helm.openshift.io + names: + kind: HelmChartRepository + listKind: HelmChartRepositoryList + plural: helmchartrepositories + singular: helmchartrepository + scope: Cluster + versions: + - name: v1beta1 + schema: + openAPIV3Schema: + description: "HelmChartRepository holds cluster-wide configuration for proxied Helm chart repository \n Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer)." + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + type: object + properties: + connectionConfig: + description: Required configuration for connecting to the chart repo + type: object + properties: + ca: + description: ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key "ca-bundle.crt" is used to locate the data. If empty, the default system roots are used. The namespace for this config map is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced config map + type: string + tlsClientConfig: + description: tlsClientConfig is an optional reference to a secret by name that contains the PEM-encoded TLS client certificate and private key to present when connecting to the server. The key "tls.crt" is used to locate the client certificate. The key "tls.key" is used to locate the private key. The namespace for this secret is openshift-config. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + url: + description: Chart repository URL + type: string + maxLength: 2048 + pattern: ^https?:\/\/ + description: + description: Optional human readable repository description, it can be used by UI for displaying purposes + type: string + maxLength: 2048 + minLength: 1 + disabled: + description: If set to true, disable the repo usage in the cluster/namespace + type: boolean + name: + description: Optional associated human readable repository name, it can be used by UI for displaying purposes + type: string + maxLength: 100 + minLength: 1 + status: + description: Observed status of the repository within the cluster.. + type: object + properties: + conditions: + description: conditions is a list of conditions and their statuses + type: array + items: + description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, \n type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + type: object + required: + - lastTransitionTime + - message + - reason + - status + - type + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + type: string + format: date-time + message: + description: message is a human readable message indicating details about the transition. This may be an empty string. + type: string + maxLength: 32768 + observedGeneration: + description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. + type: integer + format: int64 + minimum: 0 + reason: + description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. + type: string + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + status: + description: status of the condition, one of True, False, Unknown. + type: string + enum: + - "True" + - "False" + - Unknown + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + type: string + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/helm/v1beta1/0000_10-project-helm-chart-repository.crd.yaml b/vendor/github.com/openshift/api/helm/v1beta1/0000_10-project-helm-chart-repository.crd.yaml new file mode 100644 index 000000000..e3c0dd9eb --- /dev/null +++ b/vendor/github.com/openshift/api/helm/v1beta1/0000_10-project-helm-chart-repository.crd.yaml @@ -0,0 +1,139 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/1084 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: projecthelmchartrepositories.helm.openshift.io +spec: + group: helm.openshift.io + names: + kind: ProjectHelmChartRepository + listKind: ProjectHelmChartRepositoryList + plural: projecthelmchartrepositories + singular: projecthelmchartrepository + scope: Namespaced + versions: + - name: v1beta1 + schema: + openAPIV3Schema: + description: "ProjectHelmChartRepository holds namespace-wide configuration for proxied Helm chart repository \n Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer)." + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + type: object + properties: + connectionConfig: + description: Required configuration for connecting to the chart repo + type: object + properties: + basicAuthConfig: + description: basicAuthConfig is an optional reference to a secret by name that contains the basic authentication credentials to present when connecting to the server. The key "username" is used locate the username. The key "password" is used to locate the password. The namespace for this secret must be same as the namespace where the project helm chart repository is getting instantiated. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + ca: + description: ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key "ca-bundle.crt" is used to locate the data. If empty, the default system roots are used. The namespace for this configmap must be same as the namespace where the project helm chart repository is getting instantiated. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced config map + type: string + tlsClientConfig: + description: tlsClientConfig is an optional reference to a secret by name that contains the PEM-encoded TLS client certificate and private key to present when connecting to the server. The key "tls.crt" is used to locate the client certificate. The key "tls.key" is used to locate the private key. The namespace for this secret must be same as the namespace where the project helm chart repository is getting instantiated. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + url: + description: Chart repository URL + type: string + maxLength: 2048 + pattern: ^https?:\/\/ + description: + description: Optional human readable repository description, it can be used by UI for displaying purposes + type: string + maxLength: 2048 + minLength: 1 + disabled: + description: If set to true, disable the repo usage in the namespace + type: boolean + name: + description: Optional associated human readable repository name, it can be used by UI for displaying purposes + type: string + maxLength: 100 + minLength: 1 + status: + description: Observed status of the repository within the namespace.. + type: object + properties: + conditions: + description: conditions is a list of conditions and their statuses + type: array + items: + description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, \n type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + type: object + required: + - lastTransitionTime + - message + - reason + - status + - type + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + type: string + format: date-time + message: + description: message is a human readable message indicating details about the transition. This may be an empty string. + type: string + maxLength: 32768 + observedGeneration: + description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. + type: integer + format: int64 + minimum: 0 + reason: + description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. + type: string + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + status: + description: status of the condition, one of True, False, Unknown. + type: string + enum: + - "True" + - "False" + - Unknown + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + type: string + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/helm/v1beta1/Makefile b/vendor/github.com/openshift/api/helm/v1beta1/Makefile new file mode 100644 index 000000000..d61590833 --- /dev/null +++ b/vendor/github.com/openshift/api/helm/v1beta1/Makefile @@ -0,0 +1,3 @@ +.PHONY: test +test: + make -C ../../tests test GINKGO_EXTRA_ARGS=--focus="helm.openshift.io/v1beta1" diff --git a/vendor/github.com/openshift/api/helm/v1beta1/doc.go b/vendor/github.com/openshift/api/helm/v1beta1/doc.go new file mode 100644 index 000000000..8a45cd1c8 --- /dev/null +++ b/vendor/github.com/openshift/api/helm/v1beta1/doc.go @@ -0,0 +1,8 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true + +// +kubebuilder:validation:Optional +// +groupName=helm.openshift.io +// Package v1 is the v1 version of the API. +package v1beta1 diff --git a/vendor/github.com/openshift/api/helm/v1beta1/register.go b/vendor/github.com/openshift/api/helm/v1beta1/register.go new file mode 100644 index 000000000..1301eb008 --- /dev/null +++ b/vendor/github.com/openshift/api/helm/v1beta1/register.go @@ -0,0 +1,40 @@ +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + GroupName = "helm.openshift.io" + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // Install is a function which adds this version to a scheme + Install = schemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = schemeBuilder.AddToScheme +) + +// Resource generated code relies on this being here, but it logically belongs to the group +// DEPRECATED +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &HelmChartRepository{}, + &HelmChartRepositoryList{}, + &ProjectHelmChartRepository{}, + &ProjectHelmChartRepositoryList{}, + ) + metav1.AddToGroupVersion(scheme, GroupVersion) + return nil +} diff --git a/vendor/github.com/openshift/api/helm/v1beta1/stable.helmchartrepository.testsuite.yaml b/vendor/github.com/openshift/api/helm/v1beta1/stable.helmchartrepository.testsuite.yaml new file mode 100644 index 000000000..bac1227e2 --- /dev/null +++ b/vendor/github.com/openshift/api/helm/v1beta1/stable.helmchartrepository.testsuite.yaml @@ -0,0 +1,14 @@ +apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this +name: "[Stable] HelmChartRepository" +crd: 0000_10-helm-chart-repository.crd.yaml +tests: + onCreate: + - name: Should be able to create a minimal HelmChartRepository + initial: | + apiVersion: helm.openshift.io/v1beta1 + kind: HelmChartRepository + spec: {} # No spec is required for a HelmChartRepository + expected: | + apiVersion: helm.openshift.io/v1beta1 + kind: HelmChartRepository + spec: {} diff --git a/vendor/github.com/openshift/api/helm/v1beta1/stable.projecthelmchartrepository.testsuite.yaml b/vendor/github.com/openshift/api/helm/v1beta1/stable.projecthelmchartrepository.testsuite.yaml new file mode 100644 index 000000000..e11f9d349 --- /dev/null +++ b/vendor/github.com/openshift/api/helm/v1beta1/stable.projecthelmchartrepository.testsuite.yaml @@ -0,0 +1,14 @@ +apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this +name: "[Stable] ProjectHelmChartRepository" +crd: 0000_10-project-helm-chart-repository.crd.yaml +tests: + onCreate: + - name: Should be able to create a minimal ProjectHelmChartRepository + initial: | + apiVersion: helm.openshift.io/v1beta1 + kind: ProjectHelmChartRepository + spec: {} # No spec is required for a ProjectHelmChartRepository + expected: | + apiVersion: helm.openshift.io/v1beta1 + kind: ProjectHelmChartRepository + spec: {} diff --git a/vendor/github.com/openshift/api/helm/v1beta1/types_helm_chart_repository.go b/vendor/github.com/openshift/api/helm/v1beta1/types_helm_chart_repository.go new file mode 100644 index 000000000..826c1496f --- /dev/null +++ b/vendor/github.com/openshift/api/helm/v1beta1/types_helm_chart_repository.go @@ -0,0 +1,99 @@ +package v1beta1 + +import ( + configv1 "github.com/openshift/api/config/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:plural=helmchartrepositories + +// HelmChartRepository holds cluster-wide configuration for proxied Helm chart repository +// +// Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=2 +type HelmChartRepository struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration + // +kubebuilder:validation:Required + // +required + Spec HelmChartRepositorySpec `json:"spec"` + + // Observed status of the repository within the cluster.. + // +optional + Status HelmChartRepositoryStatus `json:"status"` +} + +// Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer). +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +openshift:compatibility-gen:level=2 +type HelmChartRepositoryList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + Items []HelmChartRepository `json:"items"` +} + +// Helm chart repository exposed within the cluster +type HelmChartRepositorySpec struct { + + // If set to true, disable the repo usage in the cluster/namespace + // +optional + Disabled bool `json:"disabled,omitempty"` + + // Optional associated human readable repository name, it can be used by UI for displaying purposes + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=100 + // +optional + DisplayName string `json:"name,omitempty"` + + // Optional human readable repository description, it can be used by UI for displaying purposes + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=2048 + // +optional + Description string `json:"description,omitempty"` + + // Required configuration for connecting to the chart repo + ConnectionConfig ConnectionConfig `json:"connectionConfig"` +} + +type ConnectionConfig struct { + + // Chart repository URL + // +kubebuilder:validation:Pattern=`^https?:\/\/` + // +kubebuilder:validation:MaxLength=2048 + URL string `json:"url"` + + // ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. + // It is used as a trust anchor to validate the TLS certificate presented by the remote server. + // The key "ca-bundle.crt" is used to locate the data. + // If empty, the default system roots are used. + // The namespace for this config map is openshift-config. + // +optional + CA configv1.ConfigMapNameReference `json:"ca,omitempty"` + + // tlsClientConfig is an optional reference to a secret by name that contains the + // PEM-encoded TLS client certificate and private key to present when connecting to the server. + // The key "tls.crt" is used to locate the client certificate. + // The key "tls.key" is used to locate the private key. + // The namespace for this secret is openshift-config. + // +optional + TLSClientConfig configv1.SecretNameReference `json:"tlsClientConfig,omitempty"` +} + +type HelmChartRepositoryStatus struct { + + // conditions is a list of conditions and their statuses + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` +} diff --git a/vendor/github.com/openshift/api/helm/v1beta1/types_project_helm_chart_repository.go b/vendor/github.com/openshift/api/helm/v1beta1/types_project_helm_chart_repository.go new file mode 100644 index 000000000..c4cd7759a --- /dev/null +++ b/vendor/github.com/openshift/api/helm/v1beta1/types_project_helm_chart_repository.go @@ -0,0 +1,99 @@ +package v1beta1 + +import ( + configv1 "github.com/openshift/api/config/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:plural=projecthelmchartrepositories + +// ProjectHelmChartRepository holds namespace-wide configuration for proxied Helm chart repository +// +// Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=2 +type ProjectHelmChartRepository struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration + // +kubebuilder:validation:Required + // +required + Spec ProjectHelmChartRepositorySpec `json:"spec"` + + // Observed status of the repository within the namespace.. + // +optional + Status HelmChartRepositoryStatus `json:"status"` +} + +// Project Helm chart repository exposed within a namespace +type ProjectHelmChartRepositorySpec struct { + + // If set to true, disable the repo usage in the namespace + // +optional + Disabled bool `json:"disabled,omitempty"` + + // Optional associated human readable repository name, it can be used by UI for displaying purposes + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=100 + // +optional + DisplayName string `json:"name,omitempty"` + + // Optional human readable repository description, it can be used by UI for displaying purposes + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=2048 + // +optional + Description string `json:"description,omitempty"` + + // Required configuration for connecting to the chart repo + ProjectConnectionConfig ConnectionConfigNamespaceScoped `json:"connectionConfig"` +} + +type ConnectionConfigNamespaceScoped struct { + + // Chart repository URL + // +kubebuilder:validation:Pattern=`^https?:\/\/` + // +kubebuilder:validation:MaxLength=2048 + URL string `json:"url"` + + // ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. + // It is used as a trust anchor to validate the TLS certificate presented by the remote server. + // The key "ca-bundle.crt" is used to locate the data. + // If empty, the default system roots are used. + // The namespace for this configmap must be same as the namespace where the project helm chart repository is getting instantiated. + // +optional + CA configv1.ConfigMapNameReference `json:"ca,omitempty"` + + // tlsClientConfig is an optional reference to a secret by name that contains the + // PEM-encoded TLS client certificate and private key to present when connecting to the server. + // The key "tls.crt" is used to locate the client certificate. + // The key "tls.key" is used to locate the private key. + // The namespace for this secret must be same as the namespace where the project helm chart repository is getting instantiated. + // +optional + TLSClientConfig configv1.SecretNameReference `json:"tlsClientConfig,omitempty"` + + // basicAuthConfig is an optional reference to a secret by name that contains + // the basic authentication credentials to present when connecting to the server. + // The key "username" is used locate the username. + // The key "password" is used to locate the password. + // The namespace for this secret must be same as the namespace where the project helm chart repository is getting instantiated. + // +optional + BasicAuthConfig configv1.SecretNameReference `json:"basicAuthConfig,omitempty"` +} + +// Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer). +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +openshift:compatibility-gen:level=2 +type ProjectHelmChartRepositoryList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + Items []ProjectHelmChartRepository `json:"items"` +} diff --git a/vendor/github.com/openshift/api/helm/v1beta1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/helm/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 000000000..da33cc3ef --- /dev/null +++ b/vendor/github.com/openshift/api/helm/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,227 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionConfig) DeepCopyInto(out *ConnectionConfig) { + *out = *in + out.CA = in.CA + out.TLSClientConfig = in.TLSClientConfig + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionConfig. +func (in *ConnectionConfig) DeepCopy() *ConnectionConfig { + if in == nil { + return nil + } + out := new(ConnectionConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionConfigNamespaceScoped) DeepCopyInto(out *ConnectionConfigNamespaceScoped) { + *out = *in + out.CA = in.CA + out.TLSClientConfig = in.TLSClientConfig + out.BasicAuthConfig = in.BasicAuthConfig + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionConfigNamespaceScoped. +func (in *ConnectionConfigNamespaceScoped) DeepCopy() *ConnectionConfigNamespaceScoped { + if in == nil { + return nil + } + out := new(ConnectionConfigNamespaceScoped) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HelmChartRepository) DeepCopyInto(out *HelmChartRepository) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmChartRepository. +func (in *HelmChartRepository) DeepCopy() *HelmChartRepository { + if in == nil { + return nil + } + out := new(HelmChartRepository) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HelmChartRepository) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HelmChartRepositoryList) DeepCopyInto(out *HelmChartRepositoryList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]HelmChartRepository, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmChartRepositoryList. +func (in *HelmChartRepositoryList) DeepCopy() *HelmChartRepositoryList { + if in == nil { + return nil + } + out := new(HelmChartRepositoryList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HelmChartRepositoryList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HelmChartRepositorySpec) DeepCopyInto(out *HelmChartRepositorySpec) { + *out = *in + out.ConnectionConfig = in.ConnectionConfig + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmChartRepositorySpec. +func (in *HelmChartRepositorySpec) DeepCopy() *HelmChartRepositorySpec { + if in == nil { + return nil + } + out := new(HelmChartRepositorySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HelmChartRepositoryStatus) DeepCopyInto(out *HelmChartRepositoryStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmChartRepositoryStatus. +func (in *HelmChartRepositoryStatus) DeepCopy() *HelmChartRepositoryStatus { + if in == nil { + return nil + } + out := new(HelmChartRepositoryStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectHelmChartRepository) DeepCopyInto(out *ProjectHelmChartRepository) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectHelmChartRepository. +func (in *ProjectHelmChartRepository) DeepCopy() *ProjectHelmChartRepository { + if in == nil { + return nil + } + out := new(ProjectHelmChartRepository) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProjectHelmChartRepository) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectHelmChartRepositoryList) DeepCopyInto(out *ProjectHelmChartRepositoryList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ProjectHelmChartRepository, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectHelmChartRepositoryList. +func (in *ProjectHelmChartRepositoryList) DeepCopy() *ProjectHelmChartRepositoryList { + if in == nil { + return nil + } + out := new(ProjectHelmChartRepositoryList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProjectHelmChartRepositoryList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectHelmChartRepositorySpec) DeepCopyInto(out *ProjectHelmChartRepositorySpec) { + *out = *in + out.ProjectConnectionConfig = in.ProjectConnectionConfig + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectHelmChartRepositorySpec. +func (in *ProjectHelmChartRepositorySpec) DeepCopy() *ProjectHelmChartRepositorySpec { + if in == nil { + return nil + } + out := new(ProjectHelmChartRepositorySpec) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/api/helm/v1beta1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/helm/v1beta1/zz_generated.swagger_doc_generated.go new file mode 100644 index 000000000..42d986f23 --- /dev/null +++ b/vendor/github.com/openshift/api/helm/v1beta1/zz_generated.swagger_doc_generated.go @@ -0,0 +1,107 @@ +package v1beta1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_ConnectionConfig = map[string]string{ + "url": "Chart repository URL", + "ca": "ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key \"ca-bundle.crt\" is used to locate the data. If empty, the default system roots are used. The namespace for this config map is openshift-config.", + "tlsClientConfig": "tlsClientConfig is an optional reference to a secret by name that contains the PEM-encoded TLS client certificate and private key to present when connecting to the server. The key \"tls.crt\" is used to locate the client certificate. The key \"tls.key\" is used to locate the private key. The namespace for this secret is openshift-config.", +} + +func (ConnectionConfig) SwaggerDoc() map[string]string { + return map_ConnectionConfig +} + +var map_HelmChartRepository = map[string]string{ + "": "HelmChartRepository holds cluster-wide configuration for proxied Helm chart repository\n\nCompatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec holds user settable values for configuration", + "status": "Observed status of the repository within the cluster..", +} + +func (HelmChartRepository) SwaggerDoc() map[string]string { + return map_HelmChartRepository +} + +var map_HelmChartRepositoryList = map[string]string{ + "": "Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (HelmChartRepositoryList) SwaggerDoc() map[string]string { + return map_HelmChartRepositoryList +} + +var map_HelmChartRepositorySpec = map[string]string{ + "": "Helm chart repository exposed within the cluster", + "disabled": "If set to true, disable the repo usage in the cluster/namespace", + "name": "Optional associated human readable repository name, it can be used by UI for displaying purposes", + "description": "Optional human readable repository description, it can be used by UI for displaying purposes", + "connectionConfig": "Required configuration for connecting to the chart repo", +} + +func (HelmChartRepositorySpec) SwaggerDoc() map[string]string { + return map_HelmChartRepositorySpec +} + +var map_HelmChartRepositoryStatus = map[string]string{ + "conditions": "conditions is a list of conditions and their statuses", +} + +func (HelmChartRepositoryStatus) SwaggerDoc() map[string]string { + return map_HelmChartRepositoryStatus +} + +var map_ConnectionConfigNamespaceScoped = map[string]string{ + "url": "Chart repository URL", + "ca": "ca is an optional reference to a config map by name containing the PEM-encoded CA bundle. It is used as a trust anchor to validate the TLS certificate presented by the remote server. The key \"ca-bundle.crt\" is used to locate the data. If empty, the default system roots are used. The namespace for this configmap must be same as the namespace where the project helm chart repository is getting instantiated.", + "tlsClientConfig": "tlsClientConfig is an optional reference to a secret by name that contains the PEM-encoded TLS client certificate and private key to present when connecting to the server. The key \"tls.crt\" is used to locate the client certificate. The key \"tls.key\" is used to locate the private key. The namespace for this secret must be same as the namespace where the project helm chart repository is getting instantiated.", + "basicAuthConfig": "basicAuthConfig is an optional reference to a secret by name that contains the basic authentication credentials to present when connecting to the server. The key \"username\" is used locate the username. The key \"password\" is used to locate the password. The namespace for this secret must be same as the namespace where the project helm chart repository is getting instantiated.", +} + +func (ConnectionConfigNamespaceScoped) SwaggerDoc() map[string]string { + return map_ConnectionConfigNamespaceScoped +} + +var map_ProjectHelmChartRepository = map[string]string{ + "": "ProjectHelmChartRepository holds namespace-wide configuration for proxied Helm chart repository\n\nCompatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec holds user settable values for configuration", + "status": "Observed status of the repository within the namespace..", +} + +func (ProjectHelmChartRepository) SwaggerDoc() map[string]string { + return map_ProjectHelmChartRepository +} + +var map_ProjectHelmChartRepositoryList = map[string]string{ + "": "Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (ProjectHelmChartRepositoryList) SwaggerDoc() map[string]string { + return map_ProjectHelmChartRepositoryList +} + +var map_ProjectHelmChartRepositorySpec = map[string]string{ + "": "Project Helm chart repository exposed within a namespace", + "disabled": "If set to true, disable the repo usage in the namespace", + "name": "Optional associated human readable repository name, it can be used by UI for displaying purposes", + "description": "Optional human readable repository description, it can be used by UI for displaying purposes", + "connectionConfig": "Required configuration for connecting to the chart repo", +} + +func (ProjectHelmChartRepositorySpec) SwaggerDoc() map[string]string { + return map_ProjectHelmChartRepositorySpec +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/api/image/.codegen.yaml b/vendor/github.com/openshift/api/image/.codegen.yaml new file mode 100644 index 000000000..ffa2c8d9b --- /dev/null +++ b/vendor/github.com/openshift/api/image/.codegen.yaml @@ -0,0 +1,2 @@ +swaggerdocs: + commentPolicy: Warn diff --git a/vendor/github.com/openshift/api/image/OWNERS b/vendor/github.com/openshift/api/image/OWNERS new file mode 100644 index 000000000..c12602811 --- /dev/null +++ b/vendor/github.com/openshift/api/image/OWNERS @@ -0,0 +1,5 @@ +reviewers: + - bparees + - dmage + - legionus + - miminar diff --git a/vendor/github.com/openshift/api/image/docker10/doc.go b/vendor/github.com/openshift/api/image/docker10/doc.go new file mode 100644 index 000000000..cc194d24d --- /dev/null +++ b/vendor/github.com/openshift/api/image/docker10/doc.go @@ -0,0 +1,4 @@ +// +k8s:deepcopy-gen=package,register + +// Package docker10 is the docker10 version of the API. +package docker10 diff --git a/vendor/github.com/openshift/api/image/docker10/register.go b/vendor/github.com/openshift/api/image/docker10/register.go new file mode 100644 index 000000000..3d5ad268a --- /dev/null +++ b/vendor/github.com/openshift/api/image/docker10/register.go @@ -0,0 +1,47 @@ +package docker10 + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +const ( + GroupName = "image.openshift.io" + LegacyGroupName = "" +) + +// SchemeGroupVersion is group version used to register these objects +var ( + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "1.0"} + LegacySchemeGroupVersion = schema.GroupVersion{Group: LegacyGroupName, Version: "1.0"} + + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + LegacySchemeBuilder = runtime.NewSchemeBuilder(addLegacyKnownTypes) + + AddToSchemeInCoreGroup = LegacySchemeBuilder.AddToScheme + + // Install is a function which adds this version to a scheme + Install = SchemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &DockerImage{}, + ) + return nil +} + +func addLegacyKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(LegacySchemeGroupVersion, + &DockerImage{}, + ) + return nil +} diff --git a/vendor/github.com/openshift/api/image/docker10/types_docker.go b/vendor/github.com/openshift/api/image/docker10/types_docker.go new file mode 100644 index 000000000..03f0f67fc --- /dev/null +++ b/vendor/github.com/openshift/api/image/docker10/types_docker.go @@ -0,0 +1,60 @@ +package docker10 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DockerImage is the type representing a container image and its various properties when +// retrieved from the Docker client API. +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +// +openshift:compatibility-gen:internal +type DockerImage struct { + metav1.TypeMeta `json:",inline"` + + ID string `json:"Id"` + Parent string `json:"Parent,omitempty"` + Comment string `json:"Comment,omitempty"` + Created metav1.Time `json:"Created,omitempty"` + Container string `json:"Container,omitempty"` + ContainerConfig DockerConfig `json:"ContainerConfig,omitempty"` + DockerVersion string `json:"DockerVersion,omitempty"` + Author string `json:"Author,omitempty"` + Config *DockerConfig `json:"Config,omitempty"` + Architecture string `json:"Architecture,omitempty"` + Size int64 `json:"Size,omitempty"` +} + +// DockerConfig is the list of configuration options used when creating a container. +type DockerConfig struct { + Hostname string `json:"Hostname,omitempty"` + Domainname string `json:"Domainname,omitempty"` + User string `json:"User,omitempty"` + Memory int64 `json:"Memory,omitempty"` + MemorySwap int64 `json:"MemorySwap,omitempty"` + CPUShares int64 `json:"CpuShares,omitempty"` + CPUSet string `json:"Cpuset,omitempty"` + AttachStdin bool `json:"AttachStdin,omitempty"` + AttachStdout bool `json:"AttachStdout,omitempty"` + AttachStderr bool `json:"AttachStderr,omitempty"` + PortSpecs []string `json:"PortSpecs,omitempty"` + ExposedPorts map[string]struct{} `json:"ExposedPorts,omitempty"` + Tty bool `json:"Tty,omitempty"` + OpenStdin bool `json:"OpenStdin,omitempty"` + StdinOnce bool `json:"StdinOnce,omitempty"` + Env []string `json:"Env,omitempty"` + Cmd []string `json:"Cmd,omitempty"` + DNS []string `json:"Dns,omitempty"` // For Docker API v1.9 and below only + Image string `json:"Image,omitempty"` + Volumes map[string]struct{} `json:"Volumes,omitempty"` + VolumesFrom string `json:"VolumesFrom,omitempty"` + WorkingDir string `json:"WorkingDir,omitempty"` + Entrypoint []string `json:"Entrypoint,omitempty"` + NetworkDisabled bool `json:"NetworkDisabled,omitempty"` + SecurityOpts []string `json:"SecurityOpts,omitempty"` + OnBuild []string `json:"OnBuild,omitempty"` + Labels map[string]string `json:"Labels,omitempty"` +} diff --git a/vendor/github.com/openshift/api/image/docker10/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/image/docker10/zz_generated.deepcopy.go new file mode 100644 index 000000000..2ce8330b2 --- /dev/null +++ b/vendor/github.com/openshift/api/image/docker10/zz_generated.deepcopy.go @@ -0,0 +1,114 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package docker10 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DockerConfig) DeepCopyInto(out *DockerConfig) { + *out = *in + if in.PortSpecs != nil { + in, out := &in.PortSpecs, &out.PortSpecs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExposedPorts != nil { + in, out := &in.ExposedPorts, &out.ExposedPorts + *out = make(map[string]struct{}, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Cmd != nil { + in, out := &in.Cmd, &out.Cmd + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.DNS != nil { + in, out := &in.DNS, &out.DNS + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make(map[string]struct{}, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Entrypoint != nil { + in, out := &in.Entrypoint, &out.Entrypoint + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SecurityOpts != nil { + in, out := &in.SecurityOpts, &out.SecurityOpts + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.OnBuild != nil { + in, out := &in.OnBuild, &out.OnBuild + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerConfig. +func (in *DockerConfig) DeepCopy() *DockerConfig { + if in == nil { + return nil + } + out := new(DockerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DockerImage) DeepCopyInto(out *DockerImage) { + *out = *in + out.TypeMeta = in.TypeMeta + in.Created.DeepCopyInto(&out.Created) + in.ContainerConfig.DeepCopyInto(&out.ContainerConfig) + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = new(DockerConfig) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerImage. +func (in *DockerImage) DeepCopy() *DockerImage { + if in == nil { + return nil + } + out := new(DockerImage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DockerImage) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/vendor/github.com/openshift/api/image/docker10/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/image/docker10/zz_generated.swagger_doc_generated.go new file mode 100644 index 000000000..e818f784a --- /dev/null +++ b/vendor/github.com/openshift/api/image/docker10/zz_generated.swagger_doc_generated.go @@ -0,0 +1,30 @@ +package docker10 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_DockerConfig = map[string]string{ + "": "DockerConfig is the list of configuration options used when creating a container.", +} + +func (DockerConfig) SwaggerDoc() map[string]string { + return map_DockerConfig +} + +var map_DockerImage = map[string]string{ + "": "DockerImage is the type representing a container image and its various properties when retrieved from the Docker client API.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", +} + +func (DockerImage) SwaggerDoc() map[string]string { + return map_DockerImage +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/api/image/dockerpre012/deepcopy.go b/vendor/github.com/openshift/api/image/dockerpre012/deepcopy.go new file mode 100644 index 000000000..ddeb4403c --- /dev/null +++ b/vendor/github.com/openshift/api/image/dockerpre012/deepcopy.go @@ -0,0 +1,18 @@ +package dockerpre012 + +// DeepCopyInto is manually built to copy the (probably bugged) time.Time +func (in *ImagePre012) DeepCopyInto(out *ImagePre012) { + *out = *in + out.Created = in.Created + in.ContainerConfig.DeepCopyInto(&out.ContainerConfig) + if in.Config != nil { + in, out := &in.Config, &out.Config + if *in == nil { + *out = nil + } else { + *out = new(Config) + (*in).DeepCopyInto(*out) + } + } + return +} diff --git a/vendor/github.com/openshift/api/image/dockerpre012/doc.go b/vendor/github.com/openshift/api/image/dockerpre012/doc.go new file mode 100644 index 000000000..e4a56260f --- /dev/null +++ b/vendor/github.com/openshift/api/image/dockerpre012/doc.go @@ -0,0 +1,4 @@ +// +k8s:deepcopy-gen=package,register + +// Package dockerpre012 is the dockerpre012 version of the API. +package dockerpre012 diff --git a/vendor/github.com/openshift/api/image/dockerpre012/register.go b/vendor/github.com/openshift/api/image/dockerpre012/register.go new file mode 100644 index 000000000..7ce2adb0a --- /dev/null +++ b/vendor/github.com/openshift/api/image/dockerpre012/register.go @@ -0,0 +1,46 @@ +package dockerpre012 + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +const ( + GroupName = "image.openshift.io" + LegacyGroupName = "" +) + +var ( + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "pre012"} + LegacySchemeGroupVersion = schema.GroupVersion{Group: LegacyGroupName, Version: "pre012"} + + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + + LegacySchemeBuilder = runtime.NewSchemeBuilder(addLegacyKnownTypes) + AddToSchemeInCoreGroup = LegacySchemeBuilder.AddToScheme + + // Install is a function which adds this version to a scheme + Install = SchemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &DockerImage{}, + ) + return nil +} + +func addLegacyKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(LegacySchemeGroupVersion, + &DockerImage{}, + ) + return nil +} diff --git a/vendor/github.com/openshift/api/image/dockerpre012/types_docker.go b/vendor/github.com/openshift/api/image/dockerpre012/types_docker.go new file mode 100644 index 000000000..1111892a9 --- /dev/null +++ b/vendor/github.com/openshift/api/image/dockerpre012/types_docker.go @@ -0,0 +1,140 @@ +package dockerpre012 + +import ( + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DockerImage is for earlier versions of the Docker API (pre-012 to be specific). It is also the +// version of metadata that the container image registry uses to persist metadata. +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +// +openshift:compatibility-gen:internal +type DockerImage struct { + metav1.TypeMeta `json:",inline"` + + ID string `json:"id"` + Parent string `json:"parent,omitempty"` + Comment string `json:"comment,omitempty"` + Created metav1.Time `json:"created"` + Container string `json:"container,omitempty"` + ContainerConfig DockerConfig `json:"container_config,omitempty"` + DockerVersion string `json:"docker_version,omitempty"` + Author string `json:"author,omitempty"` + Config *DockerConfig `json:"config,omitempty"` + Architecture string `json:"architecture,omitempty"` + Size int64 `json:"size,omitempty"` +} + +// DockerConfig is the list of configuration options used when creating a container. +type DockerConfig struct { + Hostname string `json:"Hostname,omitempty"` + Domainname string `json:"Domainname,omitempty"` + User string `json:"User,omitempty"` + Memory int64 `json:"Memory,omitempty"` + MemorySwap int64 `json:"MemorySwap,omitempty"` + CPUShares int64 `json:"CpuShares,omitempty"` + CPUSet string `json:"Cpuset,omitempty"` + AttachStdin bool `json:"AttachStdin,omitempty"` + AttachStdout bool `json:"AttachStdout,omitempty"` + AttachStderr bool `json:"AttachStderr,omitempty"` + PortSpecs []string `json:"PortSpecs,omitempty"` + ExposedPorts map[string]struct{} `json:"ExposedPorts,omitempty"` + Tty bool `json:"Tty,omitempty"` + OpenStdin bool `json:"OpenStdin,omitempty"` + StdinOnce bool `json:"StdinOnce,omitempty"` + Env []string `json:"Env,omitempty"` + Cmd []string `json:"Cmd,omitempty"` + DNS []string `json:"Dns,omitempty"` // For Docker API v1.9 and below only + Image string `json:"Image,omitempty"` + Volumes map[string]struct{} `json:"Volumes,omitempty"` + VolumesFrom string `json:"VolumesFrom,omitempty"` + WorkingDir string `json:"WorkingDir,omitempty"` + Entrypoint []string `json:"Entrypoint,omitempty"` + NetworkDisabled bool `json:"NetworkDisabled,omitempty"` + SecurityOpts []string `json:"SecurityOpts,omitempty"` + OnBuild []string `json:"OnBuild,omitempty"` + // This field is not supported in pre012 and will always be empty. + Labels map[string]string `json:"Labels,omitempty"` +} + +// ImagePre012 serves the same purpose as the Image type except that it is for +// earlier versions of the Docker API (pre-012 to be specific) +// Exists only for legacy conversion, copy of type from fsouza/go-dockerclient +type ImagePre012 struct { + ID string `json:"id"` + Parent string `json:"parent,omitempty"` + Comment string `json:"comment,omitempty"` + Created time.Time `json:"created"` + Container string `json:"container,omitempty"` + ContainerConfig Config `json:"container_config,omitempty"` + DockerVersion string `json:"docker_version,omitempty"` + Author string `json:"author,omitempty"` + Config *Config `json:"config,omitempty"` + Architecture string `json:"architecture,omitempty"` + Size int64 `json:"size,omitempty"` +} + +// Config is the list of configuration options used when creating a container. +// Config does not contain the options that are specific to starting a container on a +// given host. Those are contained in HostConfig +// Exists only for legacy conversion, copy of type from fsouza/go-dockerclient +type Config struct { + Hostname string `json:"Hostname,omitempty" yaml:"Hostname,omitempty"` + Domainname string `json:"Domainname,omitempty" yaml:"Domainname,omitempty"` + User string `json:"User,omitempty" yaml:"User,omitempty"` + Memory int64 `json:"Memory,omitempty" yaml:"Memory,omitempty"` + MemorySwap int64 `json:"MemorySwap,omitempty" yaml:"MemorySwap,omitempty"` + MemoryReservation int64 `json:"MemoryReservation,omitempty" yaml:"MemoryReservation,omitempty"` + KernelMemory int64 `json:"KernelMemory,omitempty" yaml:"KernelMemory,omitempty"` + PidsLimit int64 `json:"PidsLimit,omitempty" yaml:"PidsLimit,omitempty"` + CPUShares int64 `json:"CpuShares,omitempty" yaml:"CpuShares,omitempty"` + CPUSet string `json:"Cpuset,omitempty" yaml:"Cpuset,omitempty"` + AttachStdin bool `json:"AttachStdin,omitempty" yaml:"AttachStdin,omitempty"` + AttachStdout bool `json:"AttachStdout,omitempty" yaml:"AttachStdout,omitempty"` + AttachStderr bool `json:"AttachStderr,omitempty" yaml:"AttachStderr,omitempty"` + PortSpecs []string `json:"PortSpecs,omitempty" yaml:"PortSpecs,omitempty"` + ExposedPorts map[Port]struct{} `json:"ExposedPorts,omitempty" yaml:"ExposedPorts,omitempty"` + StopSignal string `json:"StopSignal,omitempty" yaml:"StopSignal,omitempty"` + Tty bool `json:"Tty,omitempty" yaml:"Tty,omitempty"` + OpenStdin bool `json:"OpenStdin,omitempty" yaml:"OpenStdin,omitempty"` + StdinOnce bool `json:"StdinOnce,omitempty" yaml:"StdinOnce,omitempty"` + Env []string `json:"Env,omitempty" yaml:"Env,omitempty"` + Cmd []string `json:"Cmd" yaml:"Cmd"` + DNS []string `json:"Dns,omitempty" yaml:"Dns,omitempty"` // For Docker API v1.9 and below only + Image string `json:"Image,omitempty" yaml:"Image,omitempty"` + Volumes map[string]struct{} `json:"Volumes,omitempty" yaml:"Volumes,omitempty"` + VolumeDriver string `json:"VolumeDriver,omitempty" yaml:"VolumeDriver,omitempty"` + VolumesFrom string `json:"VolumesFrom,omitempty" yaml:"VolumesFrom,omitempty"` + WorkingDir string `json:"WorkingDir,omitempty" yaml:"WorkingDir,omitempty"` + MacAddress string `json:"MacAddress,omitempty" yaml:"MacAddress,omitempty"` + Entrypoint []string `json:"Entrypoint" yaml:"Entrypoint"` + NetworkDisabled bool `json:"NetworkDisabled,omitempty" yaml:"NetworkDisabled,omitempty"` + SecurityOpts []string `json:"SecurityOpts,omitempty" yaml:"SecurityOpts,omitempty"` + OnBuild []string `json:"OnBuild,omitempty" yaml:"OnBuild,omitempty"` + Mounts []Mount `json:"Mounts,omitempty" yaml:"Mounts,omitempty"` + Labels map[string]string `json:"Labels,omitempty" yaml:"Labels,omitempty"` +} + +// Mount represents a mount point in the container. +// +// It has been added in the version 1.20 of the Docker API, available since +// Docker 1.8. +// Exists only for legacy conversion, copy of type from fsouza/go-dockerclient +type Mount struct { + Name string + Source string + Destination string + Driver string + Mode string + RW bool +} + +// Port represents the port number and the protocol, in the form +// /. For example: 80/tcp. +// Exists only for legacy conversion, copy of type from fsouza/go-dockerclient +type Port string diff --git a/vendor/github.com/openshift/api/image/dockerpre012/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/image/dockerpre012/zz_generated.deepcopy.go new file mode 100644 index 000000000..0e8ecb20d --- /dev/null +++ b/vendor/github.com/openshift/api/image/dockerpre012/zz_generated.deepcopy.go @@ -0,0 +1,217 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package dockerpre012 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Config) DeepCopyInto(out *Config) { + *out = *in + if in.PortSpecs != nil { + in, out := &in.PortSpecs, &out.PortSpecs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExposedPorts != nil { + in, out := &in.ExposedPorts, &out.ExposedPorts + *out = make(map[Port]struct{}, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Cmd != nil { + in, out := &in.Cmd, &out.Cmd + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.DNS != nil { + in, out := &in.DNS, &out.DNS + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make(map[string]struct{}, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Entrypoint != nil { + in, out := &in.Entrypoint, &out.Entrypoint + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SecurityOpts != nil { + in, out := &in.SecurityOpts, &out.SecurityOpts + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.OnBuild != nil { + in, out := &in.OnBuild, &out.OnBuild + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Mounts != nil { + in, out := &in.Mounts, &out.Mounts + *out = make([]Mount, len(*in)) + copy(*out, *in) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Config. +func (in *Config) DeepCopy() *Config { + if in == nil { + return nil + } + out := new(Config) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DockerConfig) DeepCopyInto(out *DockerConfig) { + *out = *in + if in.PortSpecs != nil { + in, out := &in.PortSpecs, &out.PortSpecs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExposedPorts != nil { + in, out := &in.ExposedPorts, &out.ExposedPorts + *out = make(map[string]struct{}, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Cmd != nil { + in, out := &in.Cmd, &out.Cmd + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.DNS != nil { + in, out := &in.DNS, &out.DNS + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make(map[string]struct{}, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Entrypoint != nil { + in, out := &in.Entrypoint, &out.Entrypoint + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SecurityOpts != nil { + in, out := &in.SecurityOpts, &out.SecurityOpts + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.OnBuild != nil { + in, out := &in.OnBuild, &out.OnBuild + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerConfig. +func (in *DockerConfig) DeepCopy() *DockerConfig { + if in == nil { + return nil + } + out := new(DockerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DockerImage) DeepCopyInto(out *DockerImage) { + *out = *in + out.TypeMeta = in.TypeMeta + in.Created.DeepCopyInto(&out.Created) + in.ContainerConfig.DeepCopyInto(&out.ContainerConfig) + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = new(DockerConfig) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerImage. +func (in *DockerImage) DeepCopy() *DockerImage { + if in == nil { + return nil + } + out := new(DockerImage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DockerImage) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePre012. +func (in *ImagePre012) DeepCopy() *ImagePre012 { + if in == nil { + return nil + } + out := new(ImagePre012) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Mount) DeepCopyInto(out *Mount) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Mount. +func (in *Mount) DeepCopy() *Mount { + if in == nil { + return nil + } + out := new(Mount) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/api/image/dockerpre012/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/image/dockerpre012/zz_generated.swagger_doc_generated.go new file mode 100644 index 000000000..04900e809 --- /dev/null +++ b/vendor/github.com/openshift/api/image/dockerpre012/zz_generated.swagger_doc_generated.go @@ -0,0 +1,55 @@ +package dockerpre012 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_Config = map[string]string{ + "": "Config is the list of configuration options used when creating a container. Config does not contain the options that are specific to starting a container on a given host. Those are contained in HostConfig Exists only for legacy conversion, copy of type from fsouza/go-dockerclient", +} + +func (Config) SwaggerDoc() map[string]string { + return map_Config +} + +var map_DockerConfig = map[string]string{ + "": "DockerConfig is the list of configuration options used when creating a container.", + "Labels": "This field is not supported in pre012 and will always be empty.", +} + +func (DockerConfig) SwaggerDoc() map[string]string { + return map_DockerConfig +} + +var map_DockerImage = map[string]string{ + "": "DockerImage is for earlier versions of the Docker API (pre-012 to be specific). It is also the version of metadata that the container image registry uses to persist metadata.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", +} + +func (DockerImage) SwaggerDoc() map[string]string { + return map_DockerImage +} + +var map_ImagePre012 = map[string]string{ + "": "ImagePre012 serves the same purpose as the Image type except that it is for earlier versions of the Docker API (pre-012 to be specific) Exists only for legacy conversion, copy of type from fsouza/go-dockerclient", +} + +func (ImagePre012) SwaggerDoc() map[string]string { + return map_ImagePre012 +} + +var map_Mount = map[string]string{ + "": "Mount represents a mount point in the container.\n\nIt has been added in the version 1.20 of the Docker API, available since Docker 1.8. Exists only for legacy conversion, copy of type from fsouza/go-dockerclient", +} + +func (Mount) SwaggerDoc() map[string]string { + return map_Mount +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/api/image/install.go b/vendor/github.com/openshift/api/image/install.go new file mode 100644 index 000000000..5b146faa7 --- /dev/null +++ b/vendor/github.com/openshift/api/image/install.go @@ -0,0 +1,26 @@ +package image + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + imagev1 "github.com/openshift/api/image/v1" +) + +const ( + GroupName = "image.openshift.io" +) + +var ( + schemeBuilder = runtime.NewSchemeBuilder(imagev1.Install) + // Install is a function which adds every version of this group to a scheme + Install = schemeBuilder.AddToScheme +) + +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +func Kind(kind string) schema.GroupKind { + return schema.GroupKind{Group: GroupName, Kind: kind} +} diff --git a/vendor/github.com/openshift/api/image/v1/consts.go b/vendor/github.com/openshift/api/image/v1/consts.go new file mode 100644 index 000000000..11f57a44a --- /dev/null +++ b/vendor/github.com/openshift/api/image/v1/consts.go @@ -0,0 +1,69 @@ +package v1 + +import corev1 "k8s.io/api/core/v1" + +const ( + // ManagedByOpenShiftAnnotation indicates that an image is managed by OpenShift's registry. + ManagedByOpenShiftAnnotation = "openshift.io/image.managed" + + // DockerImageRepositoryCheckAnnotation indicates that OpenShift has + // attempted to import tag and image information from an external Docker + // image repository. + DockerImageRepositoryCheckAnnotation = "openshift.io/image.dockerRepositoryCheck" + + // InsecureRepositoryAnnotation may be set true on an image stream to allow insecure access to pull content. + InsecureRepositoryAnnotation = "openshift.io/image.insecureRepository" + + // ExcludeImageSecretAnnotation indicates that a secret should not be returned by imagestream/secrets. + ExcludeImageSecretAnnotation = "openshift.io/image.excludeSecret" + + // DockerImageLayersOrderAnnotation describes layers order in the docker image. + DockerImageLayersOrderAnnotation = "image.openshift.io/dockerLayersOrder" + + // DockerImageLayersOrderAscending indicates that image layers are sorted in + // the order of their addition (from oldest to latest) + DockerImageLayersOrderAscending = "ascending" + + // DockerImageLayersOrderDescending indicates that layers are sorted in + // reversed order of their addition (from newest to oldest). + DockerImageLayersOrderDescending = "descending" + + // ImporterPreferArchAnnotation represents an architecture that should be + // selected if an image uses a manifest list and it should be + // downconverted. + ImporterPreferArchAnnotation = "importer.image.openshift.io/prefer-arch" + + // ImporterPreferOSAnnotation represents an operation system that should + // be selected if an image uses a manifest list and it should be + // downconverted. + ImporterPreferOSAnnotation = "importer.image.openshift.io/prefer-os" + + // ImageManifestBlobStoredAnnotation indicates that manifest and config blobs of image are stored in on + // storage of integrated Docker registry. + ImageManifestBlobStoredAnnotation = "image.openshift.io/manifestBlobStored" + + // DefaultImageTag is used when an image tag is needed and the configuration does not specify a tag to use. + DefaultImageTag = "latest" + + // ResourceImageStreams represents a number of image streams in a project. + ResourceImageStreams corev1.ResourceName = "openshift.io/imagestreams" + + // ResourceImageStreamImages represents a number of unique references to images in all image stream + // statuses of a project. + ResourceImageStreamImages corev1.ResourceName = "openshift.io/images" + + // ResourceImageStreamTags represents a number of unique references to images in all image stream specs + // of a project. + ResourceImageStreamTags corev1.ResourceName = "openshift.io/image-tags" + + // Limit that applies to images. Used with a max["storage"] LimitRangeItem to set + // the maximum size of an image. + LimitTypeImage corev1.LimitType = "openshift.io/Image" + + // Limit that applies to image streams. Used with a max[resource] LimitRangeItem to set the maximum number + // of resource. Where the resource is one of "openshift.io/images" and "openshift.io/image-tags". + LimitTypeImageStream corev1.LimitType = "openshift.io/ImageStream" + + // The supported type of image signature. + ImageSignatureTypeAtomicImageV1 string = "AtomicImageV1" +) diff --git a/vendor/github.com/openshift/api/image/v1/doc.go b/vendor/github.com/openshift/api/image/v1/doc.go new file mode 100644 index 000000000..e57d45bbf --- /dev/null +++ b/vendor/github.com/openshift/api/image/v1/doc.go @@ -0,0 +1,8 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=github.com/openshift/origin/pkg/image/apis/image +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true + +// +groupName=image.openshift.io +// Package v1 is the v1 version of the API. +package v1 diff --git a/vendor/github.com/openshift/api/image/v1/generated.pb.go b/vendor/github.com/openshift/api/image/v1/generated.pb.go new file mode 100644 index 000000000..ac776ad64 --- /dev/null +++ b/vendor/github.com/openshift/api/image/v1/generated.pb.go @@ -0,0 +1,11572 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/openshift/api/image/v1/generated.proto + +package v1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + v11 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *DockerImageReference) Reset() { *m = DockerImageReference{} } +func (*DockerImageReference) ProtoMessage() {} +func (*DockerImageReference) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{0} +} +func (m *DockerImageReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DockerImageReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DockerImageReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_DockerImageReference.Merge(m, src) +} +func (m *DockerImageReference) XXX_Size() int { + return m.Size() +} +func (m *DockerImageReference) XXX_DiscardUnknown() { + xxx_messageInfo_DockerImageReference.DiscardUnknown(m) +} + +var xxx_messageInfo_DockerImageReference proto.InternalMessageInfo + +func (m *Image) Reset() { *m = Image{} } +func (*Image) ProtoMessage() {} +func (*Image) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{1} +} +func (m *Image) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Image) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Image) XXX_Merge(src proto.Message) { + xxx_messageInfo_Image.Merge(m, src) +} +func (m *Image) XXX_Size() int { + return m.Size() +} +func (m *Image) XXX_DiscardUnknown() { + xxx_messageInfo_Image.DiscardUnknown(m) +} + +var xxx_messageInfo_Image proto.InternalMessageInfo + +func (m *ImageBlobReferences) Reset() { *m = ImageBlobReferences{} } +func (*ImageBlobReferences) ProtoMessage() {} +func (*ImageBlobReferences) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{2} +} +func (m *ImageBlobReferences) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageBlobReferences) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageBlobReferences) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageBlobReferences.Merge(m, src) +} +func (m *ImageBlobReferences) XXX_Size() int { + return m.Size() +} +func (m *ImageBlobReferences) XXX_DiscardUnknown() { + xxx_messageInfo_ImageBlobReferences.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageBlobReferences proto.InternalMessageInfo + +func (m *ImageImportSpec) Reset() { *m = ImageImportSpec{} } +func (*ImageImportSpec) ProtoMessage() {} +func (*ImageImportSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{3} +} +func (m *ImageImportSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageImportSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageImportSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageImportSpec.Merge(m, src) +} +func (m *ImageImportSpec) XXX_Size() int { + return m.Size() +} +func (m *ImageImportSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ImageImportSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageImportSpec proto.InternalMessageInfo + +func (m *ImageImportStatus) Reset() { *m = ImageImportStatus{} } +func (*ImageImportStatus) ProtoMessage() {} +func (*ImageImportStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{4} +} +func (m *ImageImportStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageImportStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageImportStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageImportStatus.Merge(m, src) +} +func (m *ImageImportStatus) XXX_Size() int { + return m.Size() +} +func (m *ImageImportStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ImageImportStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageImportStatus proto.InternalMessageInfo + +func (m *ImageLayer) Reset() { *m = ImageLayer{} } +func (*ImageLayer) ProtoMessage() {} +func (*ImageLayer) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{5} +} +func (m *ImageLayer) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageLayer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageLayer) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageLayer.Merge(m, src) +} +func (m *ImageLayer) XXX_Size() int { + return m.Size() +} +func (m *ImageLayer) XXX_DiscardUnknown() { + xxx_messageInfo_ImageLayer.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageLayer proto.InternalMessageInfo + +func (m *ImageLayerData) Reset() { *m = ImageLayerData{} } +func (*ImageLayerData) ProtoMessage() {} +func (*ImageLayerData) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{6} +} +func (m *ImageLayerData) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageLayerData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageLayerData) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageLayerData.Merge(m, src) +} +func (m *ImageLayerData) XXX_Size() int { + return m.Size() +} +func (m *ImageLayerData) XXX_DiscardUnknown() { + xxx_messageInfo_ImageLayerData.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageLayerData proto.InternalMessageInfo + +func (m *ImageList) Reset() { *m = ImageList{} } +func (*ImageList) ProtoMessage() {} +func (*ImageList) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{7} +} +func (m *ImageList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageList.Merge(m, src) +} +func (m *ImageList) XXX_Size() int { + return m.Size() +} +func (m *ImageList) XXX_DiscardUnknown() { + xxx_messageInfo_ImageList.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageList proto.InternalMessageInfo + +func (m *ImageLookupPolicy) Reset() { *m = ImageLookupPolicy{} } +func (*ImageLookupPolicy) ProtoMessage() {} +func (*ImageLookupPolicy) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{8} +} +func (m *ImageLookupPolicy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageLookupPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageLookupPolicy) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageLookupPolicy.Merge(m, src) +} +func (m *ImageLookupPolicy) XXX_Size() int { + return m.Size() +} +func (m *ImageLookupPolicy) XXX_DiscardUnknown() { + xxx_messageInfo_ImageLookupPolicy.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageLookupPolicy proto.InternalMessageInfo + +func (m *ImageManifest) Reset() { *m = ImageManifest{} } +func (*ImageManifest) ProtoMessage() {} +func (*ImageManifest) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{9} +} +func (m *ImageManifest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageManifest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageManifest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageManifest.Merge(m, src) +} +func (m *ImageManifest) XXX_Size() int { + return m.Size() +} +func (m *ImageManifest) XXX_DiscardUnknown() { + xxx_messageInfo_ImageManifest.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageManifest proto.InternalMessageInfo + +func (m *ImageSignature) Reset() { *m = ImageSignature{} } +func (*ImageSignature) ProtoMessage() {} +func (*ImageSignature) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{10} +} +func (m *ImageSignature) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageSignature) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageSignature) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageSignature.Merge(m, src) +} +func (m *ImageSignature) XXX_Size() int { + return m.Size() +} +func (m *ImageSignature) XXX_DiscardUnknown() { + xxx_messageInfo_ImageSignature.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageSignature proto.InternalMessageInfo + +func (m *ImageStream) Reset() { *m = ImageStream{} } +func (*ImageStream) ProtoMessage() {} +func (*ImageStream) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{11} +} +func (m *ImageStream) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageStream) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageStream) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageStream.Merge(m, src) +} +func (m *ImageStream) XXX_Size() int { + return m.Size() +} +func (m *ImageStream) XXX_DiscardUnknown() { + xxx_messageInfo_ImageStream.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageStream proto.InternalMessageInfo + +func (m *ImageStreamImage) Reset() { *m = ImageStreamImage{} } +func (*ImageStreamImage) ProtoMessage() {} +func (*ImageStreamImage) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{12} +} +func (m *ImageStreamImage) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageStreamImage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageStreamImage) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageStreamImage.Merge(m, src) +} +func (m *ImageStreamImage) XXX_Size() int { + return m.Size() +} +func (m *ImageStreamImage) XXX_DiscardUnknown() { + xxx_messageInfo_ImageStreamImage.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageStreamImage proto.InternalMessageInfo + +func (m *ImageStreamImport) Reset() { *m = ImageStreamImport{} } +func (*ImageStreamImport) ProtoMessage() {} +func (*ImageStreamImport) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{13} +} +func (m *ImageStreamImport) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageStreamImport) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageStreamImport) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageStreamImport.Merge(m, src) +} +func (m *ImageStreamImport) XXX_Size() int { + return m.Size() +} +func (m *ImageStreamImport) XXX_DiscardUnknown() { + xxx_messageInfo_ImageStreamImport.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageStreamImport proto.InternalMessageInfo + +func (m *ImageStreamImportSpec) Reset() { *m = ImageStreamImportSpec{} } +func (*ImageStreamImportSpec) ProtoMessage() {} +func (*ImageStreamImportSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{14} +} +func (m *ImageStreamImportSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageStreamImportSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageStreamImportSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageStreamImportSpec.Merge(m, src) +} +func (m *ImageStreamImportSpec) XXX_Size() int { + return m.Size() +} +func (m *ImageStreamImportSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ImageStreamImportSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageStreamImportSpec proto.InternalMessageInfo + +func (m *ImageStreamImportStatus) Reset() { *m = ImageStreamImportStatus{} } +func (*ImageStreamImportStatus) ProtoMessage() {} +func (*ImageStreamImportStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{15} +} +func (m *ImageStreamImportStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageStreamImportStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageStreamImportStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageStreamImportStatus.Merge(m, src) +} +func (m *ImageStreamImportStatus) XXX_Size() int { + return m.Size() +} +func (m *ImageStreamImportStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ImageStreamImportStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageStreamImportStatus proto.InternalMessageInfo + +func (m *ImageStreamLayers) Reset() { *m = ImageStreamLayers{} } +func (*ImageStreamLayers) ProtoMessage() {} +func (*ImageStreamLayers) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{16} +} +func (m *ImageStreamLayers) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageStreamLayers) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageStreamLayers) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageStreamLayers.Merge(m, src) +} +func (m *ImageStreamLayers) XXX_Size() int { + return m.Size() +} +func (m *ImageStreamLayers) XXX_DiscardUnknown() { + xxx_messageInfo_ImageStreamLayers.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageStreamLayers proto.InternalMessageInfo + +func (m *ImageStreamList) Reset() { *m = ImageStreamList{} } +func (*ImageStreamList) ProtoMessage() {} +func (*ImageStreamList) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{17} +} +func (m *ImageStreamList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageStreamList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageStreamList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageStreamList.Merge(m, src) +} +func (m *ImageStreamList) XXX_Size() int { + return m.Size() +} +func (m *ImageStreamList) XXX_DiscardUnknown() { + xxx_messageInfo_ImageStreamList.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageStreamList proto.InternalMessageInfo + +func (m *ImageStreamMapping) Reset() { *m = ImageStreamMapping{} } +func (*ImageStreamMapping) ProtoMessage() {} +func (*ImageStreamMapping) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{18} +} +func (m *ImageStreamMapping) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageStreamMapping) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageStreamMapping) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageStreamMapping.Merge(m, src) +} +func (m *ImageStreamMapping) XXX_Size() int { + return m.Size() +} +func (m *ImageStreamMapping) XXX_DiscardUnknown() { + xxx_messageInfo_ImageStreamMapping.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageStreamMapping proto.InternalMessageInfo + +func (m *ImageStreamSpec) Reset() { *m = ImageStreamSpec{} } +func (*ImageStreamSpec) ProtoMessage() {} +func (*ImageStreamSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{19} +} +func (m *ImageStreamSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageStreamSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageStreamSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageStreamSpec.Merge(m, src) +} +func (m *ImageStreamSpec) XXX_Size() int { + return m.Size() +} +func (m *ImageStreamSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ImageStreamSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageStreamSpec proto.InternalMessageInfo + +func (m *ImageStreamStatus) Reset() { *m = ImageStreamStatus{} } +func (*ImageStreamStatus) ProtoMessage() {} +func (*ImageStreamStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{20} +} +func (m *ImageStreamStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageStreamStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageStreamStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageStreamStatus.Merge(m, src) +} +func (m *ImageStreamStatus) XXX_Size() int { + return m.Size() +} +func (m *ImageStreamStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ImageStreamStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageStreamStatus proto.InternalMessageInfo + +func (m *ImageStreamTag) Reset() { *m = ImageStreamTag{} } +func (*ImageStreamTag) ProtoMessage() {} +func (*ImageStreamTag) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{21} +} +func (m *ImageStreamTag) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageStreamTag) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageStreamTag) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageStreamTag.Merge(m, src) +} +func (m *ImageStreamTag) XXX_Size() int { + return m.Size() +} +func (m *ImageStreamTag) XXX_DiscardUnknown() { + xxx_messageInfo_ImageStreamTag.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageStreamTag proto.InternalMessageInfo + +func (m *ImageStreamTagList) Reset() { *m = ImageStreamTagList{} } +func (*ImageStreamTagList) ProtoMessage() {} +func (*ImageStreamTagList) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{22} +} +func (m *ImageStreamTagList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageStreamTagList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageStreamTagList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageStreamTagList.Merge(m, src) +} +func (m *ImageStreamTagList) XXX_Size() int { + return m.Size() +} +func (m *ImageStreamTagList) XXX_DiscardUnknown() { + xxx_messageInfo_ImageStreamTagList.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageStreamTagList proto.InternalMessageInfo + +func (m *ImageTag) Reset() { *m = ImageTag{} } +func (*ImageTag) ProtoMessage() {} +func (*ImageTag) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{23} +} +func (m *ImageTag) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageTag) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageTag) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageTag.Merge(m, src) +} +func (m *ImageTag) XXX_Size() int { + return m.Size() +} +func (m *ImageTag) XXX_DiscardUnknown() { + xxx_messageInfo_ImageTag.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageTag proto.InternalMessageInfo + +func (m *ImageTagList) Reset() { *m = ImageTagList{} } +func (*ImageTagList) ProtoMessage() {} +func (*ImageTagList) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{24} +} +func (m *ImageTagList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageTagList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageTagList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageTagList.Merge(m, src) +} +func (m *ImageTagList) XXX_Size() int { + return m.Size() +} +func (m *ImageTagList) XXX_DiscardUnknown() { + xxx_messageInfo_ImageTagList.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageTagList proto.InternalMessageInfo + +func (m *NamedTagEventList) Reset() { *m = NamedTagEventList{} } +func (*NamedTagEventList) ProtoMessage() {} +func (*NamedTagEventList) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{25} +} +func (m *NamedTagEventList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NamedTagEventList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NamedTagEventList) XXX_Merge(src proto.Message) { + xxx_messageInfo_NamedTagEventList.Merge(m, src) +} +func (m *NamedTagEventList) XXX_Size() int { + return m.Size() +} +func (m *NamedTagEventList) XXX_DiscardUnknown() { + xxx_messageInfo_NamedTagEventList.DiscardUnknown(m) +} + +var xxx_messageInfo_NamedTagEventList proto.InternalMessageInfo + +func (m *RepositoryImportSpec) Reset() { *m = RepositoryImportSpec{} } +func (*RepositoryImportSpec) ProtoMessage() {} +func (*RepositoryImportSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{26} +} +func (m *RepositoryImportSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RepositoryImportSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RepositoryImportSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_RepositoryImportSpec.Merge(m, src) +} +func (m *RepositoryImportSpec) XXX_Size() int { + return m.Size() +} +func (m *RepositoryImportSpec) XXX_DiscardUnknown() { + xxx_messageInfo_RepositoryImportSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_RepositoryImportSpec proto.InternalMessageInfo + +func (m *RepositoryImportStatus) Reset() { *m = RepositoryImportStatus{} } +func (*RepositoryImportStatus) ProtoMessage() {} +func (*RepositoryImportStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{27} +} +func (m *RepositoryImportStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RepositoryImportStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RepositoryImportStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_RepositoryImportStatus.Merge(m, src) +} +func (m *RepositoryImportStatus) XXX_Size() int { + return m.Size() +} +func (m *RepositoryImportStatus) XXX_DiscardUnknown() { + xxx_messageInfo_RepositoryImportStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_RepositoryImportStatus proto.InternalMessageInfo + +func (m *SecretList) Reset() { *m = SecretList{} } +func (*SecretList) ProtoMessage() {} +func (*SecretList) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{28} +} +func (m *SecretList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SecretList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SecretList) XXX_Merge(src proto.Message) { + xxx_messageInfo_SecretList.Merge(m, src) +} +func (m *SecretList) XXX_Size() int { + return m.Size() +} +func (m *SecretList) XXX_DiscardUnknown() { + xxx_messageInfo_SecretList.DiscardUnknown(m) +} + +var xxx_messageInfo_SecretList proto.InternalMessageInfo + +func (m *SignatureCondition) Reset() { *m = SignatureCondition{} } +func (*SignatureCondition) ProtoMessage() {} +func (*SignatureCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{29} +} +func (m *SignatureCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SignatureCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SignatureCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_SignatureCondition.Merge(m, src) +} +func (m *SignatureCondition) XXX_Size() int { + return m.Size() +} +func (m *SignatureCondition) XXX_DiscardUnknown() { + xxx_messageInfo_SignatureCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_SignatureCondition proto.InternalMessageInfo + +func (m *SignatureGenericEntity) Reset() { *m = SignatureGenericEntity{} } +func (*SignatureGenericEntity) ProtoMessage() {} +func (*SignatureGenericEntity) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{30} +} +func (m *SignatureGenericEntity) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SignatureGenericEntity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SignatureGenericEntity) XXX_Merge(src proto.Message) { + xxx_messageInfo_SignatureGenericEntity.Merge(m, src) +} +func (m *SignatureGenericEntity) XXX_Size() int { + return m.Size() +} +func (m *SignatureGenericEntity) XXX_DiscardUnknown() { + xxx_messageInfo_SignatureGenericEntity.DiscardUnknown(m) +} + +var xxx_messageInfo_SignatureGenericEntity proto.InternalMessageInfo + +func (m *SignatureIssuer) Reset() { *m = SignatureIssuer{} } +func (*SignatureIssuer) ProtoMessage() {} +func (*SignatureIssuer) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{31} +} +func (m *SignatureIssuer) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SignatureIssuer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SignatureIssuer) XXX_Merge(src proto.Message) { + xxx_messageInfo_SignatureIssuer.Merge(m, src) +} +func (m *SignatureIssuer) XXX_Size() int { + return m.Size() +} +func (m *SignatureIssuer) XXX_DiscardUnknown() { + xxx_messageInfo_SignatureIssuer.DiscardUnknown(m) +} + +var xxx_messageInfo_SignatureIssuer proto.InternalMessageInfo + +func (m *SignatureSubject) Reset() { *m = SignatureSubject{} } +func (*SignatureSubject) ProtoMessage() {} +func (*SignatureSubject) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{32} +} +func (m *SignatureSubject) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SignatureSubject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SignatureSubject) XXX_Merge(src proto.Message) { + xxx_messageInfo_SignatureSubject.Merge(m, src) +} +func (m *SignatureSubject) XXX_Size() int { + return m.Size() +} +func (m *SignatureSubject) XXX_DiscardUnknown() { + xxx_messageInfo_SignatureSubject.DiscardUnknown(m) +} + +var xxx_messageInfo_SignatureSubject proto.InternalMessageInfo + +func (m *TagEvent) Reset() { *m = TagEvent{} } +func (*TagEvent) ProtoMessage() {} +func (*TagEvent) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{33} +} +func (m *TagEvent) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TagEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TagEvent) XXX_Merge(src proto.Message) { + xxx_messageInfo_TagEvent.Merge(m, src) +} +func (m *TagEvent) XXX_Size() int { + return m.Size() +} +func (m *TagEvent) XXX_DiscardUnknown() { + xxx_messageInfo_TagEvent.DiscardUnknown(m) +} + +var xxx_messageInfo_TagEvent proto.InternalMessageInfo + +func (m *TagEventCondition) Reset() { *m = TagEventCondition{} } +func (*TagEventCondition) ProtoMessage() {} +func (*TagEventCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{34} +} +func (m *TagEventCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TagEventCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TagEventCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_TagEventCondition.Merge(m, src) +} +func (m *TagEventCondition) XXX_Size() int { + return m.Size() +} +func (m *TagEventCondition) XXX_DiscardUnknown() { + xxx_messageInfo_TagEventCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_TagEventCondition proto.InternalMessageInfo + +func (m *TagImportPolicy) Reset() { *m = TagImportPolicy{} } +func (*TagImportPolicy) ProtoMessage() {} +func (*TagImportPolicy) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{35} +} +func (m *TagImportPolicy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TagImportPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TagImportPolicy) XXX_Merge(src proto.Message) { + xxx_messageInfo_TagImportPolicy.Merge(m, src) +} +func (m *TagImportPolicy) XXX_Size() int { + return m.Size() +} +func (m *TagImportPolicy) XXX_DiscardUnknown() { + xxx_messageInfo_TagImportPolicy.DiscardUnknown(m) +} + +var xxx_messageInfo_TagImportPolicy proto.InternalMessageInfo + +func (m *TagReference) Reset() { *m = TagReference{} } +func (*TagReference) ProtoMessage() {} +func (*TagReference) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{36} +} +func (m *TagReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TagReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TagReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_TagReference.Merge(m, src) +} +func (m *TagReference) XXX_Size() int { + return m.Size() +} +func (m *TagReference) XXX_DiscardUnknown() { + xxx_messageInfo_TagReference.DiscardUnknown(m) +} + +var xxx_messageInfo_TagReference proto.InternalMessageInfo + +func (m *TagReferencePolicy) Reset() { *m = TagReferencePolicy{} } +func (*TagReferencePolicy) ProtoMessage() {} +func (*TagReferencePolicy) Descriptor() ([]byte, []int) { + return fileDescriptor_650a0b34f65fde60, []int{37} +} +func (m *TagReferencePolicy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TagReferencePolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TagReferencePolicy) XXX_Merge(src proto.Message) { + xxx_messageInfo_TagReferencePolicy.Merge(m, src) +} +func (m *TagReferencePolicy) XXX_Size() int { + return m.Size() +} +func (m *TagReferencePolicy) XXX_DiscardUnknown() { + xxx_messageInfo_TagReferencePolicy.DiscardUnknown(m) +} + +var xxx_messageInfo_TagReferencePolicy proto.InternalMessageInfo + +func init() { + proto.RegisterType((*DockerImageReference)(nil), "github.com.openshift.api.image.v1.DockerImageReference") + proto.RegisterType((*Image)(nil), "github.com.openshift.api.image.v1.Image") + proto.RegisterType((*ImageBlobReferences)(nil), "github.com.openshift.api.image.v1.ImageBlobReferences") + proto.RegisterType((*ImageImportSpec)(nil), "github.com.openshift.api.image.v1.ImageImportSpec") + proto.RegisterType((*ImageImportStatus)(nil), "github.com.openshift.api.image.v1.ImageImportStatus") + proto.RegisterType((*ImageLayer)(nil), "github.com.openshift.api.image.v1.ImageLayer") + proto.RegisterType((*ImageLayerData)(nil), "github.com.openshift.api.image.v1.ImageLayerData") + proto.RegisterType((*ImageList)(nil), "github.com.openshift.api.image.v1.ImageList") + proto.RegisterType((*ImageLookupPolicy)(nil), "github.com.openshift.api.image.v1.ImageLookupPolicy") + proto.RegisterType((*ImageManifest)(nil), "github.com.openshift.api.image.v1.ImageManifest") + proto.RegisterType((*ImageSignature)(nil), "github.com.openshift.api.image.v1.ImageSignature") + proto.RegisterMapType((map[string]string)(nil), "github.com.openshift.api.image.v1.ImageSignature.SignedClaimsEntry") + proto.RegisterType((*ImageStream)(nil), "github.com.openshift.api.image.v1.ImageStream") + proto.RegisterType((*ImageStreamImage)(nil), "github.com.openshift.api.image.v1.ImageStreamImage") + proto.RegisterType((*ImageStreamImport)(nil), "github.com.openshift.api.image.v1.ImageStreamImport") + proto.RegisterType((*ImageStreamImportSpec)(nil), "github.com.openshift.api.image.v1.ImageStreamImportSpec") + proto.RegisterType((*ImageStreamImportStatus)(nil), "github.com.openshift.api.image.v1.ImageStreamImportStatus") + proto.RegisterType((*ImageStreamLayers)(nil), "github.com.openshift.api.image.v1.ImageStreamLayers") + proto.RegisterMapType((map[string]ImageLayerData)(nil), "github.com.openshift.api.image.v1.ImageStreamLayers.BlobsEntry") + proto.RegisterMapType((map[string]ImageBlobReferences)(nil), "github.com.openshift.api.image.v1.ImageStreamLayers.ImagesEntry") + proto.RegisterType((*ImageStreamList)(nil), "github.com.openshift.api.image.v1.ImageStreamList") + proto.RegisterType((*ImageStreamMapping)(nil), "github.com.openshift.api.image.v1.ImageStreamMapping") + proto.RegisterType((*ImageStreamSpec)(nil), "github.com.openshift.api.image.v1.ImageStreamSpec") + proto.RegisterType((*ImageStreamStatus)(nil), "github.com.openshift.api.image.v1.ImageStreamStatus") + proto.RegisterType((*ImageStreamTag)(nil), "github.com.openshift.api.image.v1.ImageStreamTag") + proto.RegisterType((*ImageStreamTagList)(nil), "github.com.openshift.api.image.v1.ImageStreamTagList") + proto.RegisterType((*ImageTag)(nil), "github.com.openshift.api.image.v1.ImageTag") + proto.RegisterType((*ImageTagList)(nil), "github.com.openshift.api.image.v1.ImageTagList") + proto.RegisterType((*NamedTagEventList)(nil), "github.com.openshift.api.image.v1.NamedTagEventList") + proto.RegisterType((*RepositoryImportSpec)(nil), "github.com.openshift.api.image.v1.RepositoryImportSpec") + proto.RegisterType((*RepositoryImportStatus)(nil), "github.com.openshift.api.image.v1.RepositoryImportStatus") + proto.RegisterType((*SecretList)(nil), "github.com.openshift.api.image.v1.SecretList") + proto.RegisterType((*SignatureCondition)(nil), "github.com.openshift.api.image.v1.SignatureCondition") + proto.RegisterType((*SignatureGenericEntity)(nil), "github.com.openshift.api.image.v1.SignatureGenericEntity") + proto.RegisterType((*SignatureIssuer)(nil), "github.com.openshift.api.image.v1.SignatureIssuer") + proto.RegisterType((*SignatureSubject)(nil), "github.com.openshift.api.image.v1.SignatureSubject") + proto.RegisterType((*TagEvent)(nil), "github.com.openshift.api.image.v1.TagEvent") + proto.RegisterType((*TagEventCondition)(nil), "github.com.openshift.api.image.v1.TagEventCondition") + proto.RegisterType((*TagImportPolicy)(nil), "github.com.openshift.api.image.v1.TagImportPolicy") + proto.RegisterType((*TagReference)(nil), "github.com.openshift.api.image.v1.TagReference") + proto.RegisterMapType((map[string]string)(nil), "github.com.openshift.api.image.v1.TagReference.AnnotationsEntry") + proto.RegisterType((*TagReferencePolicy)(nil), "github.com.openshift.api.image.v1.TagReferencePolicy") +} + +func init() { + proto.RegisterFile("github.com/openshift/api/image/v1/generated.proto", fileDescriptor_650a0b34f65fde60) +} + +var fileDescriptor_650a0b34f65fde60 = []byte{ + // 2691 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x5a, 0x4d, 0x6c, 0x1b, 0xc7, + 0x15, 0xf6, 0xf2, 0x4f, 0xd4, 0x13, 0x25, 0x59, 0x63, 0xcb, 0x61, 0x68, 0x47, 0x92, 0xd7, 0xb5, + 0xe1, 0x34, 0x0e, 0x19, 0xa9, 0x4e, 0x2a, 0xbb, 0x40, 0x1d, 0xd3, 0x74, 0x0d, 0xb6, 0x62, 0xac, + 0x8c, 0x58, 0xa3, 0x35, 0x5c, 0xa0, 0xab, 0xe5, 0x68, 0xb5, 0x15, 0xb9, 0xcb, 0xee, 0x2e, 0x95, + 0xc8, 0x68, 0x81, 0xa2, 0x28, 0x82, 0x1c, 0x7a, 0x68, 0xcf, 0x39, 0x16, 0x41, 0x51, 0x14, 0xe8, + 0xa5, 0x68, 0xd0, 0x53, 0x2f, 0x4d, 0x01, 0xa3, 0xa7, 0x20, 0xe8, 0x21, 0x97, 0x0a, 0xb1, 0xda, + 0x73, 0x6f, 0xbd, 0xf8, 0x54, 0xcc, 0xcf, 0xfe, 0x72, 0x29, 0xed, 0xaa, 0x16, 0xdb, 0xdc, 0xc8, + 0x79, 0xef, 0x7d, 0x6f, 0xe6, 0xbd, 0x37, 0xef, 0xbd, 0x99, 0x59, 0x58, 0xd6, 0x74, 0x67, 0x7b, + 0xb0, 0x59, 0x55, 0xcd, 0x5e, 0xcd, 0xec, 0x13, 0xc3, 0xde, 0xd6, 0xb7, 0x9c, 0x9a, 0xd2, 0xd7, + 0x6b, 0x7a, 0x4f, 0xd1, 0x48, 0x6d, 0x77, 0xb9, 0xa6, 0x11, 0x83, 0x58, 0x8a, 0x43, 0x3a, 0xd5, + 0xbe, 0x65, 0x3a, 0x26, 0xba, 0xe8, 0x8b, 0x54, 0x3d, 0x91, 0xaa, 0xd2, 0xd7, 0xab, 0x4c, 0xa4, + 0xba, 0xbb, 0x5c, 0x79, 0x35, 0x80, 0xaa, 0x99, 0x9a, 0x59, 0x63, 0x92, 0x9b, 0x83, 0x2d, 0xf6, + 0x8f, 0xfd, 0x61, 0xbf, 0x38, 0x62, 0x45, 0xde, 0x59, 0xb5, 0xab, 0xba, 0xc9, 0xd4, 0xaa, 0xa6, + 0x15, 0xa7, 0xb5, 0x72, 0xdd, 0xe7, 0xe9, 0x29, 0xea, 0xb6, 0x6e, 0x10, 0x6b, 0xaf, 0xd6, 0xdf, + 0xd1, 0xe8, 0x80, 0x5d, 0xeb, 0x11, 0x47, 0x89, 0x93, 0xaa, 0x8d, 0x92, 0xb2, 0x06, 0x86, 0xa3, + 0xf7, 0xc8, 0x90, 0xc0, 0x1b, 0x47, 0x09, 0xd8, 0xea, 0x36, 0xe9, 0x29, 0x51, 0x39, 0xf9, 0x53, + 0x09, 0xce, 0x36, 0x4c, 0x75, 0x87, 0x58, 0x4d, 0x6a, 0x04, 0x4c, 0xb6, 0x88, 0x45, 0x0c, 0x95, + 0xa0, 0x6b, 0x50, 0xb4, 0x88, 0xa6, 0xdb, 0x8e, 0xb5, 0x57, 0x96, 0x96, 0xa4, 0xab, 0x93, 0xf5, + 0xd3, 0x4f, 0xf6, 0x17, 0x4f, 0x1d, 0xec, 0x2f, 0x16, 0xb1, 0x18, 0xc7, 0x1e, 0x07, 0xaa, 0xc1, + 0xa4, 0xa1, 0xf4, 0x88, 0xdd, 0x57, 0x54, 0x52, 0xce, 0x30, 0xf6, 0x39, 0xc1, 0x3e, 0xf9, 0x96, + 0x4b, 0xc0, 0x3e, 0x0f, 0x5a, 0x82, 0x1c, 0xfd, 0x53, 0xce, 0x32, 0xde, 0x92, 0xe0, 0xcd, 0x51, + 0x5e, 0xcc, 0x28, 0xe8, 0x25, 0xc8, 0x3a, 0x8a, 0x56, 0xce, 0x31, 0x86, 0x29, 0xc1, 0x90, 0x6d, + 0x2b, 0x1a, 0xa6, 0xe3, 0xa8, 0x02, 0x19, 0xbd, 0x51, 0xce, 0x33, 0x2a, 0x08, 0x6a, 0xa6, 0xd9, + 0xc0, 0x19, 0xbd, 0x21, 0xff, 0xad, 0x08, 0x79, 0xb6, 0x1c, 0xf4, 0x7d, 0x28, 0x52, 0x13, 0x77, + 0x14, 0x47, 0x61, 0xab, 0x98, 0x5a, 0x79, 0xad, 0xca, 0x2d, 0x55, 0x0d, 0x5a, 0xaa, 0xda, 0xdf, + 0xd1, 0xe8, 0x80, 0x5d, 0xa5, 0xdc, 0xd5, 0xdd, 0xe5, 0xea, 0xfd, 0xcd, 0x1f, 0x10, 0xd5, 0x69, + 0x11, 0x47, 0xa9, 0x23, 0x81, 0x0e, 0xfe, 0x18, 0xf6, 0x50, 0xd1, 0x3a, 0x9c, 0xed, 0xc4, 0xd8, + 0x4f, 0x18, 0xe1, 0x82, 0x90, 0x8d, 0xb5, 0x31, 0x8e, 0x95, 0x44, 0x3f, 0x82, 0x33, 0x81, 0xf1, + 0x96, 0x3b, 0xfd, 0x2c, 0x9b, 0xfe, 0xab, 0x23, 0xa7, 0x2f, 0x1c, 0x5d, 0xc5, 0xca, 0x3b, 0x77, + 0xdf, 0x75, 0x88, 0x61, 0xeb, 0xa6, 0x51, 0x3f, 0x2f, 0xf4, 0x9f, 0x69, 0x0c, 0x23, 0xe2, 0x38, + 0x35, 0x68, 0x13, 0x2a, 0x31, 0xc3, 0x0f, 0x88, 0x45, 0xf1, 0x84, 0x37, 0x64, 0x81, 0x5a, 0x69, + 0x8c, 0xe4, 0xc4, 0x87, 0xa0, 0xa0, 0x56, 0x78, 0x85, 0x8a, 0xa1, 0x6f, 0x11, 0xdb, 0x11, 0xce, + 0x8c, 0x9d, 0xb2, 0x60, 0xc1, 0x71, 0x72, 0x68, 0x17, 0xe6, 0x02, 0xc3, 0x6b, 0xca, 0x1e, 0xb1, + 0xec, 0x72, 0x61, 0x29, 0xcb, 0xcc, 0x75, 0xe4, 0xa6, 0xaf, 0xfa, 0x52, 0xf5, 0x17, 0x85, 0xee, + 0xb9, 0x46, 0x14, 0x0f, 0x0f, 0xab, 0x40, 0x04, 0xc0, 0xd6, 0x35, 0x43, 0x71, 0x06, 0x16, 0xb1, + 0xcb, 0x13, 0x4c, 0xe1, 0x72, 0x52, 0x85, 0x1b, 0xae, 0xa4, 0x1f, 0x5f, 0xde, 0x90, 0x8d, 0x03, + 0xc0, 0xe8, 0x3e, 0xcc, 0x07, 0x74, 0xfb, 0x4c, 0xe5, 0xe2, 0x52, 0xf6, 0x6a, 0xa9, 0xfe, 0xe2, + 0xc1, 0xfe, 0xe2, 0x7c, 0x23, 0x8e, 0x01, 0xc7, 0xcb, 0xa1, 0x6d, 0xb8, 0x10, 0x63, 0xc6, 0x16, + 0xe9, 0xe8, 0x4a, 0x7b, 0xaf, 0x4f, 0xca, 0x93, 0xcc, 0x0f, 0x5f, 0x12, 0xd3, 0xba, 0xd0, 0x38, + 0x84, 0x17, 0x1f, 0x8a, 0x84, 0xee, 0x85, 0x3c, 0x73, 0xc7, 0x34, 0xb6, 0x74, 0xad, 0x0c, 0x0c, + 0x3e, 0xce, 0xd4, 0x9c, 0x01, 0x0f, 0xcb, 0xa0, 0x9f, 0x4a, 0xa1, 0x6d, 0xe6, 0x6a, 0xb2, 0xcb, + 0x53, 0xcc, 0xea, 0xaf, 0x25, 0xb5, 0xba, 0x2b, 0x18, 0xbb, 0x31, 0x3d, 0x54, 0x1c, 0xab, 0x4b, + 0xfe, 0x58, 0x82, 0x33, 0x6c, 0xa8, 0xde, 0x35, 0x37, 0xbd, 0xfd, 0x6a, 0xa3, 0x55, 0x28, 0x31, + 0x2d, 0x2d, 0xdd, 0xb6, 0x75, 0x43, 0x63, 0x3b, 0xb5, 0x58, 0x3f, 0x2b, 0x34, 0x94, 0x9a, 0x01, + 0x1a, 0x0e, 0x71, 0x22, 0x19, 0x0a, 0x5d, 0x1e, 0xae, 0xd2, 0x52, 0x96, 0x26, 0xb2, 0x83, 0xfd, + 0xc5, 0x82, 0x08, 0x38, 0x41, 0xa1, 0x3c, 0x2a, 0x37, 0x1c, 0x4f, 0x29, 0x8c, 0x47, 0x58, 0x4a, + 0x50, 0xd0, 0x2b, 0x30, 0xd9, 0xf3, 0x4c, 0x92, 0x63, 0x50, 0xd3, 0x34, 0xf5, 0xfa, 0x2b, 0xf2, + 0xe9, 0xf2, 0x5f, 0xb2, 0x30, 0xcb, 0xe6, 0xd4, 0xec, 0xf5, 0x4d, 0xcb, 0xd9, 0xe8, 0x13, 0x15, + 0xdd, 0x85, 0xdc, 0x96, 0x65, 0xf6, 0x44, 0x8e, 0xbc, 0x14, 0x48, 0x32, 0x55, 0x5a, 0xd8, 0xfc, + 0x8c, 0xe8, 0x2d, 0xdb, 0xcf, 0xd9, 0xdf, 0xb0, 0xcc, 0x1e, 0x66, 0xe2, 0xe8, 0x4d, 0xc8, 0x38, + 0x26, 0x9b, 0xe7, 0xd4, 0xca, 0xd5, 0x38, 0x90, 0x35, 0x53, 0x55, 0xba, 0x51, 0xa4, 0x02, 0x4d, + 0xdd, 0x6d, 0x13, 0x67, 0x1c, 0x13, 0x75, 0xa9, 0x2d, 0xe9, 0xb4, 0xd6, 0xcd, 0xae, 0xae, 0xee, + 0x89, 0xac, 0xb7, 0x92, 0xc0, 0xbf, 0x6d, 0x45, 0x6b, 0x06, 0x24, 0x83, 0xf6, 0xf7, 0x47, 0x71, + 0x08, 0x1d, 0xbd, 0x0b, 0xb3, 0x96, 0x3b, 0x0d, 0xa1, 0x30, 0xcf, 0x14, 0xbe, 0x9e, 0x4c, 0x21, + 0x0e, 0x0b, 0xd7, 0x5f, 0x10, 0x3a, 0x67, 0x23, 0x04, 0x1c, 0x55, 0x83, 0x6e, 0xc3, 0xac, 0x6e, + 0xa8, 0xdd, 0x41, 0xc7, 0x4f, 0x7f, 0x39, 0x16, 0x36, 0x1e, 0x44, 0x33, 0x4c, 0xc6, 0x51, 0x7e, + 0xf9, 0x77, 0x19, 0x98, 0x0b, 0xfa, 0xd1, 0x51, 0x9c, 0x81, 0x8d, 0xda, 0x50, 0xb0, 0xd9, 0x2f, + 0xe1, 0xcb, 0x6b, 0xc9, 0xea, 0x1d, 0x97, 0xae, 0xcf, 0x08, 0xed, 0x05, 0xfe, 0x1f, 0x0b, 0x2c, + 0xd4, 0x84, 0x3c, 0x5b, 0xb7, 0xe7, 0xdb, 0x84, 0xfb, 0xad, 0x3e, 0x79, 0xb0, 0xbf, 0xc8, 0x6b, + 0x31, 0xe6, 0x08, 0x6e, 0x5d, 0xcf, 0x8e, 0xa8, 0xeb, 0xdf, 0x8d, 0x86, 0x72, 0x1a, 0x6d, 0x5e, + 0xcf, 0x11, 0x1b, 0xf8, 0xef, 0x49, 0x00, 0x7e, 0xfe, 0xf6, 0x5a, 0x10, 0x69, 0x64, 0x0b, 0x72, + 0x19, 0x72, 0xb6, 0xfe, 0x98, 0x2f, 0x3a, 0xeb, 0x83, 0x33, 0xf1, 0x0d, 0xfd, 0x31, 0xc1, 0x8c, + 0x4c, 0x9b, 0x9f, 0x9e, 0x97, 0x3c, 0xb3, 0xe1, 0xe6, 0xc7, 0xcf, 0x94, 0x3e, 0x8f, 0xdc, 0x81, + 0x19, 0x7f, 0x1e, 0x0d, 0x5a, 0x75, 0x2f, 0x0a, 0x4d, 0x12, 0xd3, 0x34, 0x7d, 0xa4, 0x96, 0x4c, + 0x02, 0x2d, 0x7f, 0x94, 0x60, 0x92, 0xab, 0xd1, 0x6d, 0x07, 0x3d, 0x1a, 0xea, 0x84, 0xaa, 0xc9, + 0x22, 0x83, 0x4a, 0xb3, 0x3e, 0xc8, 0xeb, 0xff, 0xdc, 0x91, 0x40, 0x17, 0xd4, 0x82, 0xbc, 0xee, + 0x90, 0x9e, 0x5d, 0xce, 0xa4, 0xf4, 0xd8, 0xb4, 0x00, 0xcd, 0x37, 0xa9, 0x38, 0xe6, 0x28, 0xf2, + 0xaa, 0x88, 0xec, 0x35, 0xd3, 0xdc, 0x19, 0xf4, 0xc5, 0x96, 0xb9, 0x04, 0xf9, 0x2e, 0x4d, 0x1f, + 0x22, 0xbf, 0x7a, 0x92, 0x2c, 0xa7, 0x60, 0x4e, 0x93, 0x7f, 0x95, 0x81, 0xe9, 0x70, 0x77, 0x70, + 0x05, 0x0a, 0x1d, 0x5d, 0xa3, 0x1b, 0x8c, 0x3b, 0xda, 0x0b, 0xf1, 0x06, 0x1b, 0xc5, 0x82, 0x9a, + 0xda, 0xbe, 0x34, 0xed, 0xbb, 0xb1, 0x45, 0xdd, 0xc4, 0xa6, 0x95, 0xf5, 0xd3, 0x4e, 0x2b, 0x40, + 0xc3, 0x21, 0x4e, 0x2a, 0xa9, 0x58, 0xea, 0xb6, 0xee, 0x10, 0x95, 0x56, 0x64, 0xd1, 0x55, 0x79, + 0x92, 0xb7, 0x03, 0x34, 0x1c, 0xe2, 0xa4, 0x5d, 0xaf, 0x69, 0x47, 0xbb, 0xde, 0xfb, 0x1b, 0x38, + 0x63, 0xda, 0xe8, 0x65, 0x98, 0xd8, 0x55, 0x2c, 0x5d, 0x31, 0x9c, 0x72, 0x81, 0x31, 0xcc, 0x0a, + 0x86, 0x89, 0x07, 0x7c, 0x18, 0xbb, 0x74, 0xf9, 0xf7, 0x05, 0x11, 0x81, 0x5e, 0x57, 0x30, 0x86, + 0x4e, 0x79, 0x09, 0x72, 0x8e, 0x6f, 0x5b, 0x6f, 0xbf, 0x31, 0xb3, 0x32, 0x0a, 0xba, 0x0c, 0x13, + 0xaa, 0x69, 0x38, 0xc4, 0x70, 0x98, 0x31, 0x4b, 0xf5, 0x29, 0x3a, 0xfb, 0x3b, 0x7c, 0x08, 0xbb, + 0x34, 0xa4, 0x03, 0xa8, 0xa6, 0xd1, 0xd1, 0x1d, 0xdd, 0x34, 0xdc, 0x1c, 0x91, 0x24, 0x61, 0x7b, + 0x8b, 0xbd, 0xe3, 0x4a, 0xfb, 0x33, 0xf6, 0x86, 0x6c, 0x1c, 0x00, 0x47, 0x5f, 0x83, 0x69, 0x26, + 0xde, 0xec, 0x10, 0xc3, 0xd1, 0x9d, 0x3d, 0x61, 0xfa, 0x79, 0x21, 0xc6, 0x43, 0xcd, 0x25, 0xe2, + 0x30, 0x2f, 0xfa, 0x31, 0x94, 0x68, 0x1b, 0x47, 0x3a, 0x77, 0xba, 0x8a, 0xde, 0x73, 0x5b, 0xd2, + 0x3b, 0xa9, 0x3b, 0x44, 0x36, 0x71, 0x17, 0xe5, 0xae, 0xe1, 0x58, 0x81, 0xe2, 0x16, 0x24, 0xe1, + 0x90, 0x3a, 0xf4, 0x36, 0x4c, 0xa8, 0x16, 0xa1, 0x67, 0xbd, 0xf2, 0x04, 0x73, 0xe8, 0x97, 0x93, + 0x39, 0xb4, 0xad, 0xf7, 0x88, 0xb0, 0x3c, 0x17, 0xc7, 0x2e, 0x0e, 0x4d, 0x22, 0xba, 0x6d, 0x0f, + 0x48, 0xa7, 0xbe, 0x57, 0x2e, 0x26, 0xae, 0xcc, 0xde, 0x42, 0x9a, 0x54, 0xd6, 0xaa, 0x97, 0x68, + 0x12, 0x69, 0x0a, 0x1c, 0xec, 0x21, 0xa2, 0xef, 0xb9, 0xe8, 0x6d, 0x93, 0xf5, 0xa0, 0x53, 0x2b, + 0x5f, 0x49, 0x83, 0xbe, 0x31, 0x60, 0x51, 0x17, 0x84, 0x6f, 0x9b, 0xd8, 0x83, 0xac, 0xdc, 0x82, + 0xb9, 0x21, 0x43, 0xa2, 0xd3, 0x90, 0xdd, 0x21, 0xe2, 0x84, 0x8b, 0xe9, 0x4f, 0x74, 0x16, 0xf2, + 0xbb, 0x4a, 0x77, 0x20, 0xe2, 0x14, 0xf3, 0x3f, 0x37, 0x33, 0xab, 0x12, 0xcd, 0x2d, 0x53, 0xdc, + 0x33, 0x8e, 0x45, 0x94, 0xde, 0x18, 0xb6, 0x4c, 0x1b, 0x72, 0x76, 0x9f, 0xa8, 0xa2, 0xea, 0xae, + 0x24, 0x8e, 0x1c, 0x36, 0x3f, 0xda, 0xd8, 0xf9, 0xdb, 0x8c, 0xfe, 0xc3, 0x0c, 0x0d, 0x3d, 0xf2, + 0x5a, 0x04, 0xde, 0x5d, 0x5d, 0x4f, 0x89, 0x7b, 0x68, 0xab, 0x20, 0xff, 0x59, 0x82, 0xd3, 0x01, + 0xee, 0x71, 0x9d, 0xc3, 0x5b, 0xc7, 0xed, 0x50, 0xfc, 0x0a, 0x14, 0xe8, 0x52, 0xe4, 0x3f, 0xb8, + 0xcd, 0x95, 0xbb, 0x0a, 0xda, 0x62, 0x8d, 0x61, 0x19, 0x0f, 0x43, 0x1e, 0x5f, 0x4d, 0xe7, 0x19, + 0xbf, 0xa1, 0x8f, 0xf5, 0xfb, 0x66, 0xc4, 0xef, 0x37, 0x8f, 0x85, 0x7e, 0xb8, 0xf7, 0x7f, 0x96, + 0x81, 0xf9, 0xd8, 0x19, 0xd1, 0x3a, 0xcc, 0x7b, 0x6f, 0x66, 0xb9, 0xa2, 0x8f, 0xc0, 0x79, 0xb0, + 0xa0, 0x22, 0x0d, 0xc0, 0x22, 0x7d, 0xd3, 0xd6, 0x1d, 0xd3, 0xda, 0x13, 0x76, 0xf8, 0x6a, 0x82, + 0x99, 0x62, 0x4f, 0x28, 0x60, 0x86, 0x19, 0x6a, 0x68, 0x9f, 0x82, 0x03, 0xd0, 0xe8, 0x21, 0x9d, + 0x90, 0xa2, 0x11, 0x6a, 0x8e, 0x6c, 0x9a, 0xed, 0x15, 0xc4, 0xf7, 0x17, 0x41, 0x91, 0xb0, 0x40, + 0x94, 0x3f, 0xca, 0xc0, 0x0b, 0x23, 0x4c, 0x87, 0x70, 0xc8, 0x10, 0xb4, 0x0f, 0x4b, 0xe5, 0x06, + 0x7e, 0x00, 0x8c, 0x18, 0x4d, 0x8f, 0x31, 0xda, 0x8d, 0xe3, 0x18, 0x4d, 0x78, 0xf7, 0x10, 0xb3, + 0x3d, 0x8a, 0x98, 0xed, 0x7a, 0x4a, 0xb3, 0x45, 0xe2, 0x27, 0x62, 0xb8, 0x0f, 0x73, 0xa1, 0x7d, + 0x27, 0x6e, 0x5a, 0x4e, 0x7e, 0xdf, 0x75, 0x20, 0xbf, 0xd9, 0x35, 0x37, 0xdd, 0x06, 0xf6, 0x56, + 0x3a, 0x9f, 0xf0, 0x69, 0x56, 0xeb, 0x14, 0x81, 0x17, 0x68, 0x2f, 0xab, 0xb0, 0x31, 0xcc, 0xc1, + 0xd1, 0x76, 0xc4, 0x76, 0x6f, 0x1e, 0x4b, 0x0d, 0x37, 0x19, 0xd7, 0x33, 0xc2, 0x8e, 0x95, 0x1d, + 0x00, 0x7f, 0x36, 0x31, 0x55, 0xee, 0x5e, 0xb0, 0xca, 0xa5, 0xb8, 0xb6, 0xf2, 0x8e, 0x2c, 0x81, + 0xc2, 0x58, 0xf9, 0xa1, 0xa8, 0x8b, 0x23, 0xb5, 0xad, 0x85, 0xb5, 0xbd, 0x91, 0x38, 0x39, 0x87, + 0x2e, 0x5a, 0x82, 0xb5, 0xf8, 0x63, 0x49, 0x5c, 0x62, 0x08, 0xcb, 0x9c, 0xfc, 0x11, 0x67, 0x23, + 0x7c, 0xc4, 0x49, 0xbb, 0x6b, 0xe3, 0x0f, 0x3a, 0xff, 0x94, 0x00, 0x05, 0xb8, 0x5a, 0x4a, 0xbf, + 0xaf, 0x1b, 0xda, 0x17, 0xae, 0x5c, 0x1e, 0x71, 0xa8, 0x97, 0x7f, 0x93, 0x09, 0x79, 0x8b, 0xd5, + 0x03, 0x03, 0x4a, 0xdd, 0xc0, 0xf1, 0x2e, 0x6d, 0x2f, 0x12, 0x3c, 0x1a, 0xfa, 0xed, 0x70, 0x70, + 0x14, 0x87, 0xf0, 0xd1, 0x46, 0xe8, 0x1a, 0xd5, 0x4f, 0x6e, 0xe2, 0x58, 0xf8, 0x92, 0x80, 0x98, + 0x6f, 0xc4, 0x31, 0xe1, 0x78, 0x59, 0xf4, 0x36, 0xe4, 0x1c, 0x45, 0x73, 0x63, 0xa2, 0x96, 0xf2, + 0xd6, 0x28, 0x70, 0x08, 0x52, 0x34, 0x1b, 0x33, 0x28, 0xf9, 0xd7, 0xe1, 0xce, 0x43, 0x14, 0x8d, + 0x13, 0x99, 0x3d, 0x81, 0xf3, 0xfd, 0xc1, 0x66, 0x57, 0x57, 0x63, 0xa5, 0x84, 0x37, 0x2f, 0x09, + 0xe8, 0xf3, 0xeb, 0xa3, 0x59, 0xf1, 0x61, 0x38, 0xe8, 0x41, 0xc8, 0x48, 0x49, 0x3c, 0xfc, 0x96, + 0xd2, 0x23, 0x9d, 0xb6, 0xa2, 0xdd, 0xdd, 0x25, 0x86, 0x43, 0xf7, 0x62, 0xac, 0xa5, 0x3e, 0xc8, + 0xb9, 0xa7, 0x58, 0x66, 0xa9, 0xb6, 0x32, 0x8e, 0x8d, 0xf3, 0x4d, 0x1e, 0xe9, 0x7c, 0xdb, 0xa4, + 0x76, 0xf8, 0x44, 0xe8, 0xae, 0x6b, 0x05, 0x40, 0xbc, 0xc7, 0xe9, 0xa6, 0x21, 0xee, 0x0f, 0x3c, + 0xed, 0xf7, 0x3c, 0x0a, 0x0e, 0x70, 0x0d, 0x6d, 0x9b, 0xc2, 0x09, 0x6f, 0x9b, 0xed, 0x98, 0xc3, + 0xf6, 0xf5, 0x64, 0xcb, 0x66, 0xde, 0x4b, 0x7e, 0xd6, 0xf6, 0x52, 0x52, 0xfe, 0xb9, 0x74, 0xf0, + 0x7f, 0x0d, 0xa7, 0xd6, 0xb6, 0xa2, 0x8d, 0xa1, 0x48, 0x3c, 0x08, 0x17, 0x89, 0xe5, 0x74, 0x45, + 0xa2, 0xad, 0x68, 0x23, 0xea, 0xc4, 0xe7, 0x19, 0x28, 0x32, 0xc6, 0xf1, 0x04, 0x79, 0x2b, 0x74, + 0x0a, 0x49, 0x1d, 0xe5, 0xc5, 0xc8, 0xc1, 0xe3, 0x3b, 0xc7, 0x38, 0x70, 0x0e, 0xa7, 0x00, 0x38, + 0xec, 0x5e, 0x3a, 0xf7, 0xdf, 0xde, 0x4b, 0xcb, 0x7f, 0x92, 0xa0, 0xe4, 0x9a, 0x78, 0x0c, 0x91, + 0xb2, 0x1e, 0x8e, 0x94, 0x57, 0x92, 0xce, 0x7c, 0x74, 0x8c, 0xfc, 0x4b, 0x82, 0xb9, 0x21, 0xab, + 0xb9, 0x95, 0x59, 0x1a, 0x71, 0xdd, 0x7e, 0x8c, 0x69, 0xb8, 0xf0, 0xf1, 0xd3, 0x88, 0x24, 0x8c, + 0xec, 0xc9, 0x25, 0x0c, 0xf9, 0xfd, 0x2c, 0x9c, 0x8d, 0x3b, 0xf5, 0x3d, 0xaf, 0xd7, 0xac, 0xe8, + 0x5b, 0x54, 0x66, 0xdc, 0x6f, 0x51, 0xb9, 0xff, 0xd9, 0x5b, 0x54, 0x36, 0xe5, 0x5b, 0xd4, 0xfb, + 0x19, 0x38, 0x17, 0x7f, 0x96, 0x3c, 0xa1, 0x07, 0x29, 0xff, 0x14, 0x9a, 0x79, 0xfe, 0xa7, 0x50, + 0x74, 0x13, 0x66, 0x94, 0x0e, 0x0f, 0x33, 0xa5, 0x4b, 0x3b, 0x0e, 0x16, 0xc7, 0x93, 0x75, 0x74, + 0xb0, 0xbf, 0x38, 0x73, 0x3b, 0x44, 0xc1, 0x11, 0x4e, 0xf9, 0xb7, 0x12, 0xc0, 0x06, 0x51, 0x2d, + 0xe2, 0x8c, 0x21, 0x8b, 0xdc, 0x0a, 0x6f, 0xdf, 0x4a, 0x5c, 0xa8, 0xf3, 0xc9, 0x8c, 0x48, 0x1a, + 0x9f, 0x66, 0x01, 0x0d, 0xdf, 0x8b, 0xa3, 0x9b, 0xe2, 0xae, 0x9e, 0xa7, 0x8d, 0x2b, 0xc1, 0xbb, + 0xfa, 0x67, 0xfb, 0x8b, 0xe7, 0x86, 0x25, 0x02, 0xb7, 0xf8, 0x6b, 0x9e, 0xc3, 0xf9, 0x4d, 0xff, + 0xf5, 0xb0, 0x0b, 0x9f, 0xed, 0x2f, 0xc6, 0x7c, 0x37, 0x55, 0xf5, 0x90, 0x22, 0x8e, 0xd6, 0x60, + 0xba, 0xab, 0xd8, 0xce, 0xba, 0x65, 0x6e, 0x92, 0xb6, 0x2e, 0xbe, 0x18, 0x4a, 0x77, 0x97, 0xed, + 0xdd, 0xd6, 0xaf, 0x05, 0x81, 0x70, 0x18, 0x17, 0xed, 0x02, 0xa2, 0x03, 0x6d, 0x4b, 0x31, 0x6c, + 0xbe, 0x24, 0xaa, 0x2d, 0x97, 0x5a, 0x5b, 0x45, 0x68, 0x43, 0x6b, 0x43, 0x68, 0x38, 0x46, 0x03, + 0xba, 0x02, 0x05, 0x8b, 0x28, 0xb6, 0x69, 0x88, 0xb7, 0x05, 0x2f, 0x26, 0x31, 0x1b, 0xc5, 0x82, + 0x8a, 0x5e, 0x86, 0x89, 0x1e, 0xb1, 0x6d, 0x5a, 0xec, 0x22, 0xcf, 0x3b, 0x2d, 0x3e, 0x8c, 0x5d, + 0xba, 0xfc, 0x9e, 0x04, 0xbe, 0x8b, 0x58, 0x1f, 0xa9, 0xab, 0x77, 0xf9, 0x9b, 0xc4, 0x2a, 0x94, + 0x4c, 0x4b, 0x53, 0x0c, 0xfd, 0x31, 0x6f, 0x3a, 0xa5, 0xf0, 0xd3, 0xd3, 0xfd, 0x00, 0x0d, 0x87, + 0x38, 0x69, 0xb3, 0xaa, 0x9a, 0xbd, 0x9e, 0x69, 0xd0, 0x1a, 0x23, 0x5c, 0x1b, 0xc8, 0xd0, 0x2e, + 0x05, 0x07, 0xb8, 0xe4, 0x0f, 0x25, 0x98, 0x8d, 0xdc, 0xfe, 0xa3, 0x5f, 0x4a, 0x70, 0xce, 0x8e, + 0x9d, 0x9c, 0xd8, 0x1f, 0x37, 0xd2, 0x5c, 0xfa, 0x87, 0x00, 0xea, 0x0b, 0x62, 0x3e, 0x23, 0x56, + 0x8f, 0x47, 0x28, 0x96, 0xff, 0x2e, 0xc1, 0xe9, 0xe8, 0x3b, 0xc2, 0xff, 0xe3, 0x44, 0xd1, 0xeb, + 0x30, 0xc5, 0x4f, 0x5a, 0xdf, 0x22, 0x7b, 0xcd, 0x86, 0xf0, 0xc2, 0x19, 0x01, 0x36, 0xb5, 0xee, + 0x93, 0x70, 0x90, 0x4f, 0xfe, 0x79, 0x06, 0x8a, 0x6e, 0x7d, 0x45, 0xdf, 0xf6, 0xdf, 0x85, 0xa4, + 0xd4, 0xd1, 0xed, 0x05, 0xdd, 0xd0, 0xdb, 0xd0, 0xf3, 0xff, 0x10, 0xee, 0x92, 0xdb, 0xdc, 0xf1, + 0x83, 0x68, 0xfc, 0xcd, 0x43, 0xf8, 0x0c, 0x95, 0x4b, 0x72, 0x86, 0x92, 0x3f, 0xc8, 0xc2, 0xdc, + 0x50, 0xbb, 0x81, 0x6e, 0x84, 0x72, 0xde, 0xe5, 0x48, 0xce, 0x9b, 0x1f, 0x12, 0x38, 0xb1, 0x94, + 0x17, 0x9f, 0x89, 0xb2, 0x63, 0xcc, 0x44, 0xb9, 0xa4, 0x99, 0x28, 0x7f, 0x78, 0x26, 0x8a, 0x78, + 0xa7, 0x90, 0xc8, 0x3b, 0x1f, 0x49, 0x30, 0x1b, 0x69, 0xa0, 0xd0, 0x35, 0x28, 0xea, 0x86, 0x4d, + 0xd4, 0x81, 0x45, 0xc4, 0xf3, 0x81, 0x57, 0x15, 0x9b, 0x62, 0x1c, 0x7b, 0x1c, 0xa8, 0x06, 0x93, + 0xb6, 0xba, 0x4d, 0x3a, 0x83, 0x2e, 0xe9, 0x30, 0x8f, 0x14, 0xfd, 0xa7, 0xfc, 0x0d, 0x97, 0x80, + 0x7d, 0x1e, 0xd4, 0x00, 0xe0, 0xbd, 0x58, 0xcb, 0xec, 0xb8, 0xe1, 0xe6, 0x7e, 0xff, 0x06, 0x4d, + 0x8f, 0xf2, 0x6c, 0x7f, 0x71, 0xc6, 0xff, 0xc7, 0xfc, 0x1f, 0x90, 0x93, 0xff, 0x9d, 0x83, 0x52, + 0xb0, 0x11, 0x4b, 0xf0, 0x85, 0xc9, 0x3b, 0x30, 0xa5, 0x18, 0x86, 0xe9, 0x28, 0xbc, 0x5b, 0xce, + 0x24, 0xbe, 0x15, 0x0e, 0xea, 0xa9, 0xde, 0xf6, 0x21, 0xf8, 0xad, 0xb0, 0x97, 0x11, 0x02, 0x14, + 0x1c, 0xd4, 0x84, 0x6e, 0x8b, 0x16, 0x39, 0x9b, 0xbc, 0x45, 0x2e, 0x46, 0xda, 0xe3, 0x1a, 0x4c, + 0x7a, 0x9d, 0xa4, 0xf8, 0x78, 0xc9, 0xb3, 0xb2, 0xbf, 0xb5, 0x7d, 0x1e, 0x54, 0x0d, 0x05, 0x43, + 0x9e, 0x05, 0xc3, 0xcc, 0x21, 0x57, 0x1d, 0xd1, 0xfe, 0xbb, 0x30, 0xee, 0xfe, 0x7b, 0x62, 0x2c, + 0xfd, 0x77, 0xe5, 0xeb, 0x70, 0x3a, 0xea, 0xc1, 0x54, 0xef, 0xd2, 0xeb, 0x80, 0x86, 0xf5, 0x1f, + 0xd5, 0xc2, 0x0d, 0x4b, 0xf8, 0xf9, 0xac, 0x7e, 0xef, 0xc9, 0xd3, 0x85, 0x53, 0x9f, 0x3c, 0x5d, + 0x38, 0xf5, 0xd9, 0xd3, 0x85, 0x53, 0x3f, 0x39, 0x58, 0x90, 0x9e, 0x1c, 0x2c, 0x48, 0x9f, 0x1c, + 0x2c, 0x48, 0x9f, 0x1d, 0x2c, 0x48, 0x9f, 0x1f, 0x2c, 0x48, 0xbf, 0xf8, 0xc7, 0xc2, 0xa9, 0x87, + 0x17, 0x8f, 0xfc, 0x06, 0xff, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xd0, 0xa8, 0x38, 0xe0, 0xa7, + 0x2f, 0x00, 0x00, +} + +func (m *DockerImageReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DockerImageReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DockerImageReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.ID) + copy(dAtA[i:], m.ID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ID))) + i-- + dAtA[i] = 0x2a + i -= len(m.Tag) + copy(dAtA[i:], m.Tag) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Tag))) + i-- + dAtA[i] = 0x22 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x1a + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0x12 + i -= len(m.Registry) + copy(dAtA[i:], m.Registry) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Registry))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Image) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Image) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Image) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.DockerImageManifests) > 0 { + for iNdEx := len(m.DockerImageManifests) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.DockerImageManifests[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + } + } + i -= len(m.DockerImageConfig) + copy(dAtA[i:], m.DockerImageConfig) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DockerImageConfig))) + i-- + dAtA[i] = 0x52 + i -= len(m.DockerImageManifestMediaType) + copy(dAtA[i:], m.DockerImageManifestMediaType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DockerImageManifestMediaType))) + i-- + dAtA[i] = 0x4a + if len(m.DockerImageSignatures) > 0 { + for iNdEx := len(m.DockerImageSignatures) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.DockerImageSignatures[iNdEx]) + copy(dAtA[i:], m.DockerImageSignatures[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DockerImageSignatures[iNdEx]))) + i-- + dAtA[i] = 0x42 + } + } + if len(m.Signatures) > 0 { + for iNdEx := len(m.Signatures) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Signatures[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + } + if len(m.DockerImageLayers) > 0 { + for iNdEx := len(m.DockerImageLayers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.DockerImageLayers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + i -= len(m.DockerImageManifest) + copy(dAtA[i:], m.DockerImageManifest) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DockerImageManifest))) + i-- + dAtA[i] = 0x2a + i -= len(m.DockerImageMetadataVersion) + copy(dAtA[i:], m.DockerImageMetadataVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DockerImageMetadataVersion))) + i-- + dAtA[i] = 0x22 + { + size, err := m.DockerImageMetadata.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.DockerImageReference) + copy(dAtA[i:], m.DockerImageReference) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DockerImageReference))) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageBlobReferences) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageBlobReferences) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageBlobReferences) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Manifests) > 0 { + for iNdEx := len(m.Manifests) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Manifests[iNdEx]) + copy(dAtA[i:], m.Manifests[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Manifests[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + i-- + if m.ImageMissing { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + if m.Config != nil { + i -= len(*m.Config) + copy(dAtA[i:], *m.Config) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Config))) + i-- + dAtA[i] = 0x12 + } + if len(m.Layers) > 0 { + for iNdEx := len(m.Layers) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Layers[iNdEx]) + copy(dAtA[i:], m.Layers[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Layers[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ImageImportSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageImportSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageImportSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.ReferencePolicy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + i-- + if m.IncludeManifest { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + { + size, err := m.ImportPolicy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if m.To != nil { + { + size, err := m.To.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + { + size, err := m.From.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageImportStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageImportStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageImportStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Manifests) > 0 { + for iNdEx := len(m.Manifests) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Manifests[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + i -= len(m.Tag) + copy(dAtA[i:], m.Tag) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Tag))) + i-- + dAtA[i] = 0x1a + if m.Image != nil { + { + size, err := m.Image.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageLayer) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageLayer) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageLayer) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.MediaType) + copy(dAtA[i:], m.MediaType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MediaType))) + i-- + dAtA[i] = 0x1a + i = encodeVarintGenerated(dAtA, i, uint64(m.LayerSize)) + i-- + dAtA[i] = 0x10 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageLayerData) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageLayerData) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageLayerData) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.MediaType) + copy(dAtA[i:], m.MediaType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MediaType))) + i-- + dAtA[i] = 0x12 + if m.LayerSize != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.LayerSize)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ImageList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageLookupPolicy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageLookupPolicy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageLookupPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i-- + if m.Local { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + return len(dAtA) - i, nil +} + +func (m *ImageManifest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageManifest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageManifest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Variant) + copy(dAtA[i:], m.Variant) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Variant))) + i-- + dAtA[i] = 0x32 + i -= len(m.OS) + copy(dAtA[i:], m.OS) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.OS))) + i-- + dAtA[i] = 0x2a + i -= len(m.Architecture) + copy(dAtA[i:], m.Architecture) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Architecture))) + i-- + dAtA[i] = 0x22 + i = encodeVarintGenerated(dAtA, i, uint64(m.ManifestSize)) + i-- + dAtA[i] = 0x18 + i -= len(m.MediaType) + copy(dAtA[i:], m.MediaType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MediaType))) + i-- + dAtA[i] = 0x12 + i -= len(m.Digest) + copy(dAtA[i:], m.Digest) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Digest))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageSignature) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageSignature) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageSignature) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.IssuedTo != nil { + { + size, err := m.IssuedTo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + if m.IssuedBy != nil { + { + size, err := m.IssuedBy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + if m.Created != nil { + { + size, err := m.Created.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + if len(m.SignedClaims) > 0 { + keysForSignedClaims := make([]string, 0, len(m.SignedClaims)) + for k := range m.SignedClaims { + keysForSignedClaims = append(keysForSignedClaims, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForSignedClaims) + for iNdEx := len(keysForSignedClaims) - 1; iNdEx >= 0; iNdEx-- { + v := m.SignedClaims[string(keysForSignedClaims[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForSignedClaims[iNdEx]) + copy(dAtA[i:], keysForSignedClaims[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForSignedClaims[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x32 + } + } + i -= len(m.ImageIdentity) + copy(dAtA[i:], m.ImageIdentity) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ImageIdentity))) + i-- + dAtA[i] = 0x2a + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if m.Content != nil { + i -= len(m.Content) + copy(dAtA[i:], m.Content) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Content))) + i-- + dAtA[i] = 0x1a + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageStream) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageStream) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageStream) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageStreamImage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageStreamImage) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageStreamImage) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Image.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageStreamImport) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageStreamImport) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageStreamImport) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageStreamImportSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageStreamImportSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageStreamImportSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Images) > 0 { + for iNdEx := len(m.Images) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Images[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.Repository != nil { + { + size, err := m.Repository.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i-- + if m.Import { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *ImageStreamImportStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageStreamImportStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageStreamImportStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Images) > 0 { + for iNdEx := len(m.Images) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Images[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.Repository != nil { + { + size, err := m.Repository.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Import != nil { + { + size, err := m.Import.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ImageStreamLayers) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageStreamLayers) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageStreamLayers) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Images) > 0 { + keysForImages := make([]string, 0, len(m.Images)) + for k := range m.Images { + keysForImages = append(keysForImages, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForImages) + for iNdEx := len(keysForImages) - 1; iNdEx >= 0; iNdEx-- { + v := m.Images[string(keysForImages[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForImages[iNdEx]) + copy(dAtA[i:], keysForImages[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForImages[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Blobs) > 0 { + keysForBlobs := make([]string, 0, len(m.Blobs)) + for k := range m.Blobs { + keysForBlobs = append(keysForBlobs, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForBlobs) + for iNdEx := len(keysForBlobs) - 1; iNdEx >= 0; iNdEx-- { + v := m.Blobs[string(keysForBlobs[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForBlobs[iNdEx]) + copy(dAtA[i:], keysForBlobs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForBlobs[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageStreamList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageStreamList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageStreamList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageStreamMapping) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageStreamMapping) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageStreamMapping) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Tag) + copy(dAtA[i:], m.Tag) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Tag))) + i-- + dAtA[i] = 0x1a + { + size, err := m.Image.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageStreamSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageStreamSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageStreamSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.LookupPolicy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if len(m.Tags) > 0 { + for iNdEx := len(m.Tags) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Tags[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.DockerImageRepository) + copy(dAtA[i:], m.DockerImageRepository) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DockerImageRepository))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageStreamStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageStreamStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageStreamStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.PublicDockerImageRepository) + copy(dAtA[i:], m.PublicDockerImageRepository) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PublicDockerImageRepository))) + i-- + dAtA[i] = 0x1a + if len(m.Tags) > 0 { + for iNdEx := len(m.Tags) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Tags[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.DockerImageRepository) + copy(dAtA[i:], m.DockerImageRepository) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DockerImageRepository))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageStreamTag) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageStreamTag) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageStreamTag) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.LookupPolicy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + { + size, err := m.Image.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + i = encodeVarintGenerated(dAtA, i, uint64(m.Generation)) + i-- + dAtA[i] = 0x18 + if m.Tag != nil { + { + size, err := m.Tag.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageStreamTagList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageStreamTagList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageStreamTagList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageTag) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageTag) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageTag) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Image != nil { + { + size, err := m.Image.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.Status != nil { + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Spec != nil { + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageTagList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageTagList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageTagList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *NamedTagEventList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NamedTagEventList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NamedTagEventList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Tag) + copy(dAtA[i:], m.Tag) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Tag))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RepositoryImportSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RepositoryImportSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RepositoryImportSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.ReferencePolicy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + i-- + if m.IncludeManifest { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + { + size, err := m.ImportPolicy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.From.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RepositoryImportStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RepositoryImportStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RepositoryImportStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.AdditionalTags) > 0 { + for iNdEx := len(m.AdditionalTags) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AdditionalTags[iNdEx]) + copy(dAtA[i:], m.AdditionalTags[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AdditionalTags[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Images) > 0 { + for iNdEx := len(m.Images) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Images[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SecretList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SecretList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SecretList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SignatureCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SignatureCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SignatureCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x32 + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x2a + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + { + size, err := m.LastProbeTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SignatureGenericEntity) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SignatureGenericEntity) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SignatureGenericEntity) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.CommonName) + copy(dAtA[i:], m.CommonName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CommonName))) + i-- + dAtA[i] = 0x12 + i -= len(m.Organization) + copy(dAtA[i:], m.Organization) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Organization))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SignatureIssuer) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SignatureIssuer) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SignatureIssuer) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.SignatureGenericEntity.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SignatureSubject) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SignatureSubject) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SignatureSubject) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.PublicKeyID) + copy(dAtA[i:], m.PublicKeyID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PublicKeyID))) + i-- + dAtA[i] = 0x12 + { + size, err := m.SignatureGenericEntity.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *TagEvent) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TagEvent) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TagEvent) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.Generation)) + i-- + dAtA[i] = 0x20 + i -= len(m.Image) + copy(dAtA[i:], m.Image) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Image))) + i-- + dAtA[i] = 0x1a + i -= len(m.DockerImageReference) + copy(dAtA[i:], m.DockerImageReference) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DockerImageReference))) + i-- + dAtA[i] = 0x12 + { + size, err := m.Created.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *TagEventCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TagEventCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TagEventCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.Generation)) + i-- + dAtA[i] = 0x30 + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *TagImportPolicy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TagImportPolicy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TagImportPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.ImportMode) + copy(dAtA[i:], m.ImportMode) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ImportMode))) + i-- + dAtA[i] = 0x1a + i-- + if m.Scheduled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + i-- + if m.Insecure { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *TagReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TagReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TagReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.ReferencePolicy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + { + size, err := m.ImportPolicy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + if m.Generation != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Generation)) + i-- + dAtA[i] = 0x28 + } + i-- + if m.Reference { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + if m.From != nil { + { + size, err := m.From.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.Annotations) > 0 { + keysForAnnotations := make([]string, 0, len(m.Annotations)) + for k := range m.Annotations { + keysForAnnotations = append(keysForAnnotations, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) + for iNdEx := len(keysForAnnotations) - 1; iNdEx >= 0; iNdEx-- { + v := m.Annotations[string(keysForAnnotations[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForAnnotations[iNdEx]) + copy(dAtA[i:], keysForAnnotations[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAnnotations[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *TagReferencePolicy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TagReferencePolicy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TagReferencePolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *DockerImageReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Registry) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Tag) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ID) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Image) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DockerImageReference) + n += 1 + l + sovGenerated(uint64(l)) + l = m.DockerImageMetadata.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DockerImageMetadataVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DockerImageManifest) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.DockerImageLayers) > 0 { + for _, e := range m.DockerImageLayers { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Signatures) > 0 { + for _, e := range m.Signatures { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.DockerImageSignatures) > 0 { + for _, b := range m.DockerImageSignatures { + l = len(b) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.DockerImageManifestMediaType) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DockerImageConfig) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.DockerImageManifests) > 0 { + for _, e := range m.DockerImageManifests { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ImageBlobReferences) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Layers) > 0 { + for _, s := range m.Layers { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Config != nil { + l = len(*m.Config) + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + if len(m.Manifests) > 0 { + for _, s := range m.Manifests { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ImageImportSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.From.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.To != nil { + l = m.To.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.ImportPolicy.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + l = m.ReferencePolicy.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ImageImportStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Image != nil { + l = m.Image.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Tag) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Manifests) > 0 { + for _, e := range m.Manifests { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ImageLayer) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.LayerSize)) + l = len(m.MediaType) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ImageLayerData) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.LayerSize != nil { + n += 1 + sovGenerated(uint64(*m.LayerSize)) + } + l = len(m.MediaType) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ImageList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ImageLookupPolicy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + return n +} + +func (m *ImageManifest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Digest) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.MediaType) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.ManifestSize)) + l = len(m.Architecture) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.OS) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Variant) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ImageSignature) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.Content != nil { + l = len(m.Content) + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.ImageIdentity) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.SignedClaims) > 0 { + for k, v := range m.SignedClaims { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.Created != nil { + l = m.Created.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.IssuedBy != nil { + l = m.IssuedBy.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.IssuedTo != nil { + l = m.IssuedTo.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ImageStream) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ImageStreamImage) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Image.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ImageStreamImport) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ImageStreamImportSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + if m.Repository != nil { + l = m.Repository.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Images) > 0 { + for _, e := range m.Images { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ImageStreamImportStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Import != nil { + l = m.Import.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Repository != nil { + l = m.Repository.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Images) > 0 { + for _, e := range m.Images { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ImageStreamLayers) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Blobs) > 0 { + for k, v := range m.Blobs { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Images) > 0 { + for k, v := range m.Images { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *ImageStreamList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ImageStreamMapping) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Image.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Tag) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ImageStreamSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.DockerImageRepository) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Tags) > 0 { + for _, e := range m.Tags { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.LookupPolicy.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ImageStreamStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.DockerImageRepository) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Tags) > 0 { + for _, e := range m.Tags { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.PublicDockerImageRepository) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ImageStreamTag) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Tag != nil { + l = m.Tag.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 1 + sovGenerated(uint64(m.Generation)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.Image.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.LookupPolicy.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ImageStreamTagList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ImageTag) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Spec != nil { + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Status != nil { + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Image != nil { + l = m.Image.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ImageTagList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *NamedTagEventList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Tag) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *RepositoryImportSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.From.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.ImportPolicy.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + l = m.ReferencePolicy.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *RepositoryImportStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Images) > 0 { + for _, e := range m.Images { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.AdditionalTags) > 0 { + for _, s := range m.AdditionalTags { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *SecretList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *SignatureCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastProbeTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SignatureGenericEntity) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Organization) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.CommonName) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SignatureIssuer) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.SignatureGenericEntity.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SignatureSubject) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.SignatureGenericEntity.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.PublicKeyID) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *TagEvent) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Created.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DockerImageReference) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Image) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Generation)) + return n +} + +func (m *TagEventCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Generation)) + return n +} + +func (m *TagImportPolicy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + n += 2 + l = len(m.ImportMode) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *TagReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Annotations) > 0 { + for k, v := range m.Annotations { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.From != nil { + l = m.From.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + if m.Generation != nil { + n += 1 + sovGenerated(uint64(*m.Generation)) + } + l = m.ImportPolicy.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.ReferencePolicy.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *TagReferencePolicy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *DockerImageReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DockerImageReference{`, + `Registry:` + fmt.Sprintf("%v", this.Registry) + `,`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Tag:` + fmt.Sprintf("%v", this.Tag) + `,`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `}`, + }, "") + return s +} +func (this *Image) String() string { + if this == nil { + return "nil" + } + repeatedStringForDockerImageLayers := "[]ImageLayer{" + for _, f := range this.DockerImageLayers { + repeatedStringForDockerImageLayers += strings.Replace(strings.Replace(f.String(), "ImageLayer", "ImageLayer", 1), `&`, ``, 1) + "," + } + repeatedStringForDockerImageLayers += "}" + repeatedStringForSignatures := "[]ImageSignature{" + for _, f := range this.Signatures { + repeatedStringForSignatures += strings.Replace(strings.Replace(f.String(), "ImageSignature", "ImageSignature", 1), `&`, ``, 1) + "," + } + repeatedStringForSignatures += "}" + repeatedStringForDockerImageManifests := "[]ImageManifest{" + for _, f := range this.DockerImageManifests { + repeatedStringForDockerImageManifests += strings.Replace(strings.Replace(f.String(), "ImageManifest", "ImageManifest", 1), `&`, ``, 1) + "," + } + repeatedStringForDockerImageManifests += "}" + s := strings.Join([]string{`&Image{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `DockerImageReference:` + fmt.Sprintf("%v", this.DockerImageReference) + `,`, + `DockerImageMetadata:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.DockerImageMetadata), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `DockerImageMetadataVersion:` + fmt.Sprintf("%v", this.DockerImageMetadataVersion) + `,`, + `DockerImageManifest:` + fmt.Sprintf("%v", this.DockerImageManifest) + `,`, + `DockerImageLayers:` + repeatedStringForDockerImageLayers + `,`, + `Signatures:` + repeatedStringForSignatures + `,`, + `DockerImageSignatures:` + fmt.Sprintf("%v", this.DockerImageSignatures) + `,`, + `DockerImageManifestMediaType:` + fmt.Sprintf("%v", this.DockerImageManifestMediaType) + `,`, + `DockerImageConfig:` + fmt.Sprintf("%v", this.DockerImageConfig) + `,`, + `DockerImageManifests:` + repeatedStringForDockerImageManifests + `,`, + `}`, + }, "") + return s +} +func (this *ImageBlobReferences) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ImageBlobReferences{`, + `Layers:` + fmt.Sprintf("%v", this.Layers) + `,`, + `Config:` + valueToStringGenerated(this.Config) + `,`, + `ImageMissing:` + fmt.Sprintf("%v", this.ImageMissing) + `,`, + `Manifests:` + fmt.Sprintf("%v", this.Manifests) + `,`, + `}`, + }, "") + return s +} +func (this *ImageImportSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ImageImportSpec{`, + `From:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.From), "ObjectReference", "v11.ObjectReference", 1), `&`, ``, 1) + `,`, + `To:` + strings.Replace(fmt.Sprintf("%v", this.To), "LocalObjectReference", "v11.LocalObjectReference", 1) + `,`, + `ImportPolicy:` + strings.Replace(strings.Replace(this.ImportPolicy.String(), "TagImportPolicy", "TagImportPolicy", 1), `&`, ``, 1) + `,`, + `IncludeManifest:` + fmt.Sprintf("%v", this.IncludeManifest) + `,`, + `ReferencePolicy:` + strings.Replace(strings.Replace(this.ReferencePolicy.String(), "TagReferencePolicy", "TagReferencePolicy", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ImageImportStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForManifests := "[]Image{" + for _, f := range this.Manifests { + repeatedStringForManifests += strings.Replace(strings.Replace(f.String(), "Image", "Image", 1), `&`, ``, 1) + "," + } + repeatedStringForManifests += "}" + s := strings.Join([]string{`&ImageImportStatus{`, + `Status:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Status), "Status", "v1.Status", 1), `&`, ``, 1) + `,`, + `Image:` + strings.Replace(this.Image.String(), "Image", "Image", 1) + `,`, + `Tag:` + fmt.Sprintf("%v", this.Tag) + `,`, + `Manifests:` + repeatedStringForManifests + `,`, + `}`, + }, "") + return s +} +func (this *ImageLayer) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ImageLayer{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `LayerSize:` + fmt.Sprintf("%v", this.LayerSize) + `,`, + `MediaType:` + fmt.Sprintf("%v", this.MediaType) + `,`, + `}`, + }, "") + return s +} +func (this *ImageLayerData) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ImageLayerData{`, + `LayerSize:` + valueToStringGenerated(this.LayerSize) + `,`, + `MediaType:` + fmt.Sprintf("%v", this.MediaType) + `,`, + `}`, + }, "") + return s +} +func (this *ImageList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Image{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Image", "Image", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ImageList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ImageLookupPolicy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ImageLookupPolicy{`, + `Local:` + fmt.Sprintf("%v", this.Local) + `,`, + `}`, + }, "") + return s +} +func (this *ImageManifest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ImageManifest{`, + `Digest:` + fmt.Sprintf("%v", this.Digest) + `,`, + `MediaType:` + fmt.Sprintf("%v", this.MediaType) + `,`, + `ManifestSize:` + fmt.Sprintf("%v", this.ManifestSize) + `,`, + `Architecture:` + fmt.Sprintf("%v", this.Architecture) + `,`, + `OS:` + fmt.Sprintf("%v", this.OS) + `,`, + `Variant:` + fmt.Sprintf("%v", this.Variant) + `,`, + `}`, + }, "") + return s +} +func (this *ImageSignature) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]SignatureCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "SignatureCondition", "SignatureCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + keysForSignedClaims := make([]string, 0, len(this.SignedClaims)) + for k := range this.SignedClaims { + keysForSignedClaims = append(keysForSignedClaims, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForSignedClaims) + mapStringForSignedClaims := "map[string]string{" + for _, k := range keysForSignedClaims { + mapStringForSignedClaims += fmt.Sprintf("%v: %v,", k, this.SignedClaims[k]) + } + mapStringForSignedClaims += "}" + s := strings.Join([]string{`&ImageSignature{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Content:` + valueToStringGenerated(this.Content) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `ImageIdentity:` + fmt.Sprintf("%v", this.ImageIdentity) + `,`, + `SignedClaims:` + mapStringForSignedClaims + `,`, + `Created:` + strings.Replace(fmt.Sprintf("%v", this.Created), "Time", "v1.Time", 1) + `,`, + `IssuedBy:` + strings.Replace(this.IssuedBy.String(), "SignatureIssuer", "SignatureIssuer", 1) + `,`, + `IssuedTo:` + strings.Replace(this.IssuedTo.String(), "SignatureSubject", "SignatureSubject", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ImageStream) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ImageStream{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ImageStreamSpec", "ImageStreamSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ImageStreamStatus", "ImageStreamStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ImageStreamImage) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ImageStreamImage{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Image:` + strings.Replace(strings.Replace(this.Image.String(), "Image", "Image", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ImageStreamImport) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ImageStreamImport{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ImageStreamImportSpec", "ImageStreamImportSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ImageStreamImportStatus", "ImageStreamImportStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ImageStreamImportSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForImages := "[]ImageImportSpec{" + for _, f := range this.Images { + repeatedStringForImages += strings.Replace(strings.Replace(f.String(), "ImageImportSpec", "ImageImportSpec", 1), `&`, ``, 1) + "," + } + repeatedStringForImages += "}" + s := strings.Join([]string{`&ImageStreamImportSpec{`, + `Import:` + fmt.Sprintf("%v", this.Import) + `,`, + `Repository:` + strings.Replace(this.Repository.String(), "RepositoryImportSpec", "RepositoryImportSpec", 1) + `,`, + `Images:` + repeatedStringForImages + `,`, + `}`, + }, "") + return s +} +func (this *ImageStreamImportStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForImages := "[]ImageImportStatus{" + for _, f := range this.Images { + repeatedStringForImages += strings.Replace(strings.Replace(f.String(), "ImageImportStatus", "ImageImportStatus", 1), `&`, ``, 1) + "," + } + repeatedStringForImages += "}" + s := strings.Join([]string{`&ImageStreamImportStatus{`, + `Import:` + strings.Replace(this.Import.String(), "ImageStream", "ImageStream", 1) + `,`, + `Repository:` + strings.Replace(this.Repository.String(), "RepositoryImportStatus", "RepositoryImportStatus", 1) + `,`, + `Images:` + repeatedStringForImages + `,`, + `}`, + }, "") + return s +} +func (this *ImageStreamLayers) String() string { + if this == nil { + return "nil" + } + keysForBlobs := make([]string, 0, len(this.Blobs)) + for k := range this.Blobs { + keysForBlobs = append(keysForBlobs, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForBlobs) + mapStringForBlobs := "map[string]ImageLayerData{" + for _, k := range keysForBlobs { + mapStringForBlobs += fmt.Sprintf("%v: %v,", k, this.Blobs[k]) + } + mapStringForBlobs += "}" + keysForImages := make([]string, 0, len(this.Images)) + for k := range this.Images { + keysForImages = append(keysForImages, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForImages) + mapStringForImages := "map[string]ImageBlobReferences{" + for _, k := range keysForImages { + mapStringForImages += fmt.Sprintf("%v: %v,", k, this.Images[k]) + } + mapStringForImages += "}" + s := strings.Join([]string{`&ImageStreamLayers{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Blobs:` + mapStringForBlobs + `,`, + `Images:` + mapStringForImages + `,`, + `}`, + }, "") + return s +} +func (this *ImageStreamList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ImageStream{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ImageStream", "ImageStream", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ImageStreamList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ImageStreamMapping) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ImageStreamMapping{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Image:` + strings.Replace(strings.Replace(this.Image.String(), "Image", "Image", 1), `&`, ``, 1) + `,`, + `Tag:` + fmt.Sprintf("%v", this.Tag) + `,`, + `}`, + }, "") + return s +} +func (this *ImageStreamSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForTags := "[]TagReference{" + for _, f := range this.Tags { + repeatedStringForTags += strings.Replace(strings.Replace(f.String(), "TagReference", "TagReference", 1), `&`, ``, 1) + "," + } + repeatedStringForTags += "}" + s := strings.Join([]string{`&ImageStreamSpec{`, + `DockerImageRepository:` + fmt.Sprintf("%v", this.DockerImageRepository) + `,`, + `Tags:` + repeatedStringForTags + `,`, + `LookupPolicy:` + strings.Replace(strings.Replace(this.LookupPolicy.String(), "ImageLookupPolicy", "ImageLookupPolicy", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ImageStreamStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForTags := "[]NamedTagEventList{" + for _, f := range this.Tags { + repeatedStringForTags += strings.Replace(strings.Replace(f.String(), "NamedTagEventList", "NamedTagEventList", 1), `&`, ``, 1) + "," + } + repeatedStringForTags += "}" + s := strings.Join([]string{`&ImageStreamStatus{`, + `DockerImageRepository:` + fmt.Sprintf("%v", this.DockerImageRepository) + `,`, + `Tags:` + repeatedStringForTags + `,`, + `PublicDockerImageRepository:` + fmt.Sprintf("%v", this.PublicDockerImageRepository) + `,`, + `}`, + }, "") + return s +} +func (this *ImageStreamTag) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]TagEventCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "TagEventCondition", "TagEventCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&ImageStreamTag{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Tag:` + strings.Replace(this.Tag.String(), "TagReference", "TagReference", 1) + `,`, + `Generation:` + fmt.Sprintf("%v", this.Generation) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `Image:` + strings.Replace(strings.Replace(this.Image.String(), "Image", "Image", 1), `&`, ``, 1) + `,`, + `LookupPolicy:` + strings.Replace(strings.Replace(this.LookupPolicy.String(), "ImageLookupPolicy", "ImageLookupPolicy", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ImageStreamTagList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ImageStreamTag{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ImageStreamTag", "ImageStreamTag", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ImageStreamTagList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ImageTag) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ImageTag{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(this.Spec.String(), "TagReference", "TagReference", 1) + `,`, + `Status:` + strings.Replace(this.Status.String(), "NamedTagEventList", "NamedTagEventList", 1) + `,`, + `Image:` + strings.Replace(this.Image.String(), "Image", "Image", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ImageTagList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ImageTag{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ImageTag", "ImageTag", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ImageTagList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *NamedTagEventList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]TagEvent{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "TagEvent", "TagEvent", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + repeatedStringForConditions := "[]TagEventCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "TagEventCondition", "TagEventCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&NamedTagEventList{`, + `Tag:` + fmt.Sprintf("%v", this.Tag) + `,`, + `Items:` + repeatedStringForItems + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s +} +func (this *RepositoryImportSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RepositoryImportSpec{`, + `From:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.From), "ObjectReference", "v11.ObjectReference", 1), `&`, ``, 1) + `,`, + `ImportPolicy:` + strings.Replace(strings.Replace(this.ImportPolicy.String(), "TagImportPolicy", "TagImportPolicy", 1), `&`, ``, 1) + `,`, + `IncludeManifest:` + fmt.Sprintf("%v", this.IncludeManifest) + `,`, + `ReferencePolicy:` + strings.Replace(strings.Replace(this.ReferencePolicy.String(), "TagReferencePolicy", "TagReferencePolicy", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *RepositoryImportStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForImages := "[]ImageImportStatus{" + for _, f := range this.Images { + repeatedStringForImages += strings.Replace(strings.Replace(f.String(), "ImageImportStatus", "ImageImportStatus", 1), `&`, ``, 1) + "," + } + repeatedStringForImages += "}" + s := strings.Join([]string{`&RepositoryImportStatus{`, + `Status:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Status), "Status", "v1.Status", 1), `&`, ``, 1) + `,`, + `Images:` + repeatedStringForImages + `,`, + `AdditionalTags:` + fmt.Sprintf("%v", this.AdditionalTags) + `,`, + `}`, + }, "") + return s +} +func (this *SecretList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Secret{" + for _, f := range this.Items { + repeatedStringForItems += fmt.Sprintf("%v", f) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&SecretList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *SignatureCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SignatureCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastProbeTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastProbeTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *SignatureGenericEntity) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SignatureGenericEntity{`, + `Organization:` + fmt.Sprintf("%v", this.Organization) + `,`, + `CommonName:` + fmt.Sprintf("%v", this.CommonName) + `,`, + `}`, + }, "") + return s +} +func (this *SignatureIssuer) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SignatureIssuer{`, + `SignatureGenericEntity:` + strings.Replace(strings.Replace(this.SignatureGenericEntity.String(), "SignatureGenericEntity", "SignatureGenericEntity", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *SignatureSubject) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SignatureSubject{`, + `SignatureGenericEntity:` + strings.Replace(strings.Replace(this.SignatureGenericEntity.String(), "SignatureGenericEntity", "SignatureGenericEntity", 1), `&`, ``, 1) + `,`, + `PublicKeyID:` + fmt.Sprintf("%v", this.PublicKeyID) + `,`, + `}`, + }, "") + return s +} +func (this *TagEvent) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TagEvent{`, + `Created:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Created), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `DockerImageReference:` + fmt.Sprintf("%v", this.DockerImageReference) + `,`, + `Image:` + fmt.Sprintf("%v", this.Image) + `,`, + `Generation:` + fmt.Sprintf("%v", this.Generation) + `,`, + `}`, + }, "") + return s +} +func (this *TagEventCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TagEventCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `Generation:` + fmt.Sprintf("%v", this.Generation) + `,`, + `}`, + }, "") + return s +} +func (this *TagImportPolicy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TagImportPolicy{`, + `Insecure:` + fmt.Sprintf("%v", this.Insecure) + `,`, + `Scheduled:` + fmt.Sprintf("%v", this.Scheduled) + `,`, + `ImportMode:` + fmt.Sprintf("%v", this.ImportMode) + `,`, + `}`, + }, "") + return s +} +func (this *TagReference) String() string { + if this == nil { + return "nil" + } + keysForAnnotations := make([]string, 0, len(this.Annotations)) + for k := range this.Annotations { + keysForAnnotations = append(keysForAnnotations, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) + mapStringForAnnotations := "map[string]string{" + for _, k := range keysForAnnotations { + mapStringForAnnotations += fmt.Sprintf("%v: %v,", k, this.Annotations[k]) + } + mapStringForAnnotations += "}" + s := strings.Join([]string{`&TagReference{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Annotations:` + mapStringForAnnotations + `,`, + `From:` + strings.Replace(fmt.Sprintf("%v", this.From), "ObjectReference", "v11.ObjectReference", 1) + `,`, + `Reference:` + fmt.Sprintf("%v", this.Reference) + `,`, + `Generation:` + valueToStringGenerated(this.Generation) + `,`, + `ImportPolicy:` + strings.Replace(strings.Replace(this.ImportPolicy.String(), "TagImportPolicy", "TagImportPolicy", 1), `&`, ``, 1) + `,`, + `ReferencePolicy:` + strings.Replace(strings.Replace(this.ReferencePolicy.String(), "TagReferencePolicy", "TagReferencePolicy", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *TagReferencePolicy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TagReferencePolicy{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *DockerImageReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DockerImageReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DockerImageReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Registry", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Registry = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tag", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tag = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Image) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Image: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Image: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DockerImageReference", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DockerImageReference = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DockerImageMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.DockerImageMetadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DockerImageMetadataVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DockerImageMetadataVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DockerImageManifest", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DockerImageManifest = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DockerImageLayers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DockerImageLayers = append(m.DockerImageLayers, ImageLayer{}) + if err := m.DockerImageLayers[len(m.DockerImageLayers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Signatures", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Signatures = append(m.Signatures, ImageSignature{}) + if err := m.Signatures[len(m.Signatures)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DockerImageSignatures", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DockerImageSignatures = append(m.DockerImageSignatures, make([]byte, postIndex-iNdEx)) + copy(m.DockerImageSignatures[len(m.DockerImageSignatures)-1], dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DockerImageManifestMediaType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DockerImageManifestMediaType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DockerImageConfig", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DockerImageConfig = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DockerImageManifests", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DockerImageManifests = append(m.DockerImageManifests, ImageManifest{}) + if err := m.DockerImageManifests[len(m.DockerImageManifests)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageBlobReferences) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageBlobReferences: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageBlobReferences: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Layers", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Layers = append(m.Layers, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Config = &s + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ImageMissing", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ImageMissing = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Manifests", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Manifests = append(m.Manifests, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageImportSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageImportSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageImportSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.From.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field To", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.To == nil { + m.To = &v11.LocalObjectReference{} + } + if err := m.To.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImportPolicy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ImportPolicy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IncludeManifest", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IncludeManifest = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReferencePolicy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ReferencePolicy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageImportStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageImportStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageImportStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Image == nil { + m.Image = &Image{} + } + if err := m.Image.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tag", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tag = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Manifests", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Manifests = append(m.Manifests, Image{}) + if err := m.Manifests[len(m.Manifests)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageLayer) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageLayer: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageLayer: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LayerSize", wireType) + } + m.LayerSize = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LayerSize |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MediaType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MediaType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageLayerData) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageLayerData: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageLayerData: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LayerSize", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.LayerSize = &v + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MediaType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MediaType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Image{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageLookupPolicy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageLookupPolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageLookupPolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Local", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Local = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageManifest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageManifest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageManifest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Digest", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Digest = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MediaType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MediaType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ManifestSize", wireType) + } + m.ManifestSize = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ManifestSize |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Architecture", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Architecture = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OS", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OS = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Variant", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Variant = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageSignature) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageSignature: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageSignature: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Content", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Content = append(m.Content[:0], dAtA[iNdEx:postIndex]...) + if m.Content == nil { + m.Content = []byte{} + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, SignatureCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImageIdentity", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ImageIdentity = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SignedClaims", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SignedClaims == nil { + m.SignedClaims = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.SignedClaims[mapkey] = mapvalue + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Created", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Created == nil { + m.Created = &v1.Time{} + } + if err := m.Created.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IssuedBy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.IssuedBy == nil { + m.IssuedBy = &SignatureIssuer{} + } + if err := m.IssuedBy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IssuedTo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.IssuedTo == nil { + m.IssuedTo = &SignatureSubject{} + } + if err := m.IssuedTo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageStream) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageStream: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageStream: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageStreamImage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageStreamImage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageStreamImage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Image.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageStreamImport) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageStreamImport: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageStreamImport: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageStreamImportSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageStreamImportSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageStreamImportSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Import", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Import = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Repository", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Repository == nil { + m.Repository = &RepositoryImportSpec{} + } + if err := m.Repository.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Images", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Images = append(m.Images, ImageImportSpec{}) + if err := m.Images[len(m.Images)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageStreamImportStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageStreamImportStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageStreamImportStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Import", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Import == nil { + m.Import = &ImageStream{} + } + if err := m.Import.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Repository", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Repository == nil { + m.Repository = &RepositoryImportStatus{} + } + if err := m.Repository.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Images", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Images = append(m.Images, ImageImportStatus{}) + if err := m.Images[len(m.Images)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageStreamLayers) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageStreamLayers: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageStreamLayers: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Blobs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Blobs == nil { + m.Blobs = make(map[string]ImageLayerData) + } + var mapkey string + mapvalue := &ImageLayerData{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &ImageLayerData{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Blobs[mapkey] = *mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Images", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Images == nil { + m.Images = make(map[string]ImageBlobReferences) + } + var mapkey string + mapvalue := &ImageBlobReferences{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &ImageBlobReferences{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Images[mapkey] = *mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageStreamList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageStreamList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageStreamList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ImageStream{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageStreamMapping) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageStreamMapping: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageStreamMapping: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Image.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tag", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tag = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageStreamSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageStreamSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageStreamSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DockerImageRepository", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DockerImageRepository = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tags", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tags = append(m.Tags, TagReference{}) + if err := m.Tags[len(m.Tags)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LookupPolicy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LookupPolicy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageStreamStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageStreamStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageStreamStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DockerImageRepository", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DockerImageRepository = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tags", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tags = append(m.Tags, NamedTagEventList{}) + if err := m.Tags[len(m.Tags)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PublicDockerImageRepository", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PublicDockerImageRepository = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageStreamTag) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageStreamTag: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageStreamTag: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tag", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Tag == nil { + m.Tag = &TagReference{} + } + if err := m.Tag.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Generation", wireType) + } + m.Generation = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Generation |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, TagEventCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Image.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LookupPolicy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LookupPolicy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageStreamTagList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageStreamTagList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageStreamTagList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ImageStreamTag{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageTag) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageTag: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageTag: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Spec == nil { + m.Spec = &TagReference{} + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Status == nil { + m.Status = &NamedTagEventList{} + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Image == nil { + m.Image = &Image{} + } + if err := m.Image.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageTagList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageTagList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageTagList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ImageTag{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NamedTagEventList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NamedTagEventList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NamedTagEventList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tag", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tag = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, TagEvent{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, TagEventCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RepositoryImportSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RepositoryImportSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RepositoryImportSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.From.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImportPolicy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ImportPolicy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IncludeManifest", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IncludeManifest = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReferencePolicy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ReferencePolicy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RepositoryImportStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RepositoryImportStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RepositoryImportStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Images", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Images = append(m.Images, ImageImportStatus{}) + if err := m.Images[len(m.Images)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AdditionalTags", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AdditionalTags = append(m.AdditionalTags, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SecretList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SecretList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SecretList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, v11.Secret{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SignatureCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SignatureCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SignatureCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = SignatureConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastProbeTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastProbeTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SignatureGenericEntity) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SignatureGenericEntity: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SignatureGenericEntity: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Organization", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Organization = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CommonName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CommonName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SignatureIssuer) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SignatureIssuer: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SignatureIssuer: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SignatureGenericEntity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.SignatureGenericEntity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SignatureSubject) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SignatureSubject: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SignatureSubject: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SignatureGenericEntity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.SignatureGenericEntity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PublicKeyID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PublicKeyID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TagEvent) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TagEvent: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TagEvent: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Created", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Created.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DockerImageReference", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DockerImageReference = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Image = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Generation", wireType) + } + m.Generation = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Generation |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TagEventCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TagEventCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TagEventCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = TagEventConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Generation", wireType) + } + m.Generation = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Generation |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TagImportPolicy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TagImportPolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TagImportPolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Insecure", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Insecure = bool(v != 0) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Scheduled", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Scheduled = bool(v != 0) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImportMode", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ImportMode = ImportModeType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TagReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TagReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TagReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Annotations == nil { + m.Annotations = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Annotations[mapkey] = mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.From == nil { + m.From = &v11.ObjectReference{} + } + if err := m.From.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Reference", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Reference = bool(v != 0) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Generation", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Generation = &v + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImportPolicy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ImportPolicy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReferencePolicy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ReferencePolicy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TagReferencePolicy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TagReferencePolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TagReferencePolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = TagReferencePolicyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/openshift/api/image/v1/generated.proto b/vendor/github.com/openshift/api/image/v1/generated.proto new file mode 100644 index 000000000..0b7ae7182 --- /dev/null +++ b/vendor/github.com/openshift/api/image/v1/generated.proto @@ -0,0 +1,746 @@ + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package github.com.openshift.api.image.v1; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "github.com/openshift/api/image/v1"; + +// DockerImageReference points to a container image. +message DockerImageReference { + // Registry is the registry that contains the container image + optional string registry = 1; + + // Namespace is the namespace that contains the container image + optional string namespace = 2; + + // Name is the name of the container image + optional string name = 3; + + // Tag is which tag of the container image is being referenced + optional string tag = 4; + + // ID is the identifier for the container image + optional string iD = 5; +} + +// Image is an immutable representation of a container image and metadata at a point in time. +// Images are named by taking a hash of their contents (metadata and content) and any change +// in format, content, or metadata results in a new name. The images resource is primarily +// for use by cluster administrators and integrations like the cluster image registry - end +// users instead access images via the imagestreamtags or imagestreamimages resources. While +// image metadata is stored in the API, any integration that implements the container image +// registry API must provide its own storage for the raw manifest data, image config, and +// layer contents. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message Image { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // DockerImageReference is the string that can be used to pull this image. + optional string dockerImageReference = 2; + + // DockerImageMetadata contains metadata about this image + // +patchStrategy=replace + // +kubebuilder:pruning:PreserveUnknownFields + optional k8s.io.apimachinery.pkg.runtime.RawExtension dockerImageMetadata = 3; + + // DockerImageMetadataVersion conveys the version of the object, which if empty defaults to "1.0" + optional string dockerImageMetadataVersion = 4; + + // DockerImageManifest is the raw JSON of the manifest + optional string dockerImageManifest = 5; + + // DockerImageLayers represents the layers in the image. May not be set if the image does not define that data or if the image represents a manifest list. + repeated ImageLayer dockerImageLayers = 6; + + // Signatures holds all signatures of the image. + // +patchMergeKey=name + // +patchStrategy=merge + repeated ImageSignature signatures = 7; + + // DockerImageSignatures provides the signatures as opaque blobs. This is a part of manifest schema v1. + repeated bytes dockerImageSignatures = 8; + + // DockerImageManifestMediaType specifies the mediaType of manifest. This is a part of manifest schema v2. + optional string dockerImageManifestMediaType = 9; + + // DockerImageConfig is a JSON blob that the runtime uses to set up the container. This is a part of manifest schema v2. + // Will not be set when the image represents a manifest list. + optional string dockerImageConfig = 10; + + // DockerImageManifests holds information about sub-manifests when the image represents a manifest list. + // When this field is present, no DockerImageLayers should be specified. + repeated ImageManifest dockerImageManifests = 11; +} + +// ImageBlobReferences describes the blob references within an image. +message ImageBlobReferences { + // imageMissing is true if the image is referenced by the image stream but the image + // object has been deleted from the API by an administrator. When this field is set, + // layers and config fields may be empty and callers that depend on the image metadata + // should consider the image to be unavailable for download or viewing. + // +optional + optional bool imageMissing = 3; + + // layers is the list of blobs that compose this image, from base layer to top layer. + // All layers referenced by this array will be defined in the blobs map. Some images + // may have zero layers. + // +optional + repeated string layers = 1; + + // config, if set, is the blob that contains the image config. Some images do + // not have separate config blobs and this field will be set to nil if so. + // +optional + optional string config = 2; + + // manifests is the list of other image names that this image points + // to. For a single architecture image, it is empty. For a multi-arch + // image, it consists of the digests of single architecture images, + // such images shouldn't have layers nor config. + // +optional + repeated string manifests = 4; +} + +// ImageImportSpec describes a request to import a specific image. +message ImageImportSpec { + // From is the source of an image to import; only kind DockerImage is allowed + optional k8s.io.api.core.v1.ObjectReference from = 1; + + // To is a tag in the current image stream to assign the imported image to, if name is not specified the default tag from from.name will be used + optional k8s.io.api.core.v1.LocalObjectReference to = 2; + + // ImportPolicy is the policy controlling how the image is imported + optional TagImportPolicy importPolicy = 3; + + // ReferencePolicy defines how other components should consume the image + optional TagReferencePolicy referencePolicy = 5; + + // IncludeManifest determines if the manifest for each image is returned in the response + optional bool includeManifest = 4; +} + +// ImageImportStatus describes the result of an image import. +message ImageImportStatus { + // Status is the status of the image import, including errors encountered while retrieving the image + optional k8s.io.apimachinery.pkg.apis.meta.v1.Status status = 1; + + // Image is the metadata of that image, if the image was located + optional Image image = 2; + + // Tag is the tag this image was located under, if any + optional string tag = 3; + + // Manifests holds sub-manifests metadata when importing a manifest list + repeated Image manifests = 4; +} + +// ImageLayer represents a single layer of the image. Some images may have multiple layers. Some may have none. +message ImageLayer { + // Name of the layer as defined by the underlying store. + optional string name = 1; + + // Size of the layer in bytes as defined by the underlying store. + optional int64 size = 2; + + // MediaType of the referenced object. + optional string mediaType = 3; +} + +// ImageLayerData contains metadata about an image layer. +message ImageLayerData { + // Size of the layer in bytes as defined by the underlying store. This field is + // optional if the necessary information about size is not available. + optional int64 size = 1; + + // MediaType of the referenced object. + optional string mediaType = 2; +} + +// ImageList is a list of Image objects. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message ImageList { + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of images + repeated Image items = 2; +} + +// ImageLookupPolicy describes how an image stream can be used to override the image references +// used by pods, builds, and other resources in a namespace. +message ImageLookupPolicy { + // local will change the docker short image references (like "mysql" or + // "php:latest") on objects in this namespace to the image ID whenever they match + // this image stream, instead of reaching out to a remote registry. The name will + // be fully qualified to an image ID if found. The tag's referencePolicy is taken + // into account on the replaced value. Only works within the current namespace. + optional bool local = 3; +} + +// ImageManifest represents sub-manifests of a manifest list. The Digest field points to a regular +// Image object. +message ImageManifest { + // Digest is the unique identifier for the manifest. It refers to an Image object. + optional string digest = 1; + + // MediaType defines the type of the manifest, possible values are application/vnd.oci.image.manifest.v1+json, + // application/vnd.docker.distribution.manifest.v2+json or application/vnd.docker.distribution.manifest.v1+json. + optional string mediaType = 2; + + // ManifestSize represents the size of the raw object contents, in bytes. + optional int64 manifestSize = 3; + + // Architecture specifies the supported CPU architecture, for example `amd64` or `ppc64le`. + optional string architecture = 4; + + // OS specifies the operating system, for example `linux`. + optional string os = 5; + + // Variant is an optional field repreenting a variant of the CPU, for example v6 to specify a particular CPU + // variant of the ARM CPU. + optional string variant = 6; +} + +// ImageSignature holds a signature of an image. It allows to verify image identity and possibly other claims +// as long as the signature is trusted. Based on this information it is possible to restrict runnable images +// to those matching cluster-wide policy. +// Mandatory fields should be parsed by clients doing image verification. The others are parsed from +// signature's content by the server. They serve just an informative purpose. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message ImageSignature { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Required: Describes a type of stored blob. + optional string type = 2; + + // Required: An opaque binary string which is an image's signature. + optional bytes content = 3; + + // Conditions represent the latest available observations of a signature's current state. + // +patchMergeKey=type + // +patchStrategy=merge + repeated SignatureCondition conditions = 4; + + // A human readable string representing image's identity. It could be a product name and version, or an + // image pull spec (e.g. "registry.access.redhat.com/rhel7/rhel:7.2"). + optional string imageIdentity = 5; + + // Contains claims from the signature. + map signedClaims = 6; + + // If specified, it is the time of signature's creation. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time created = 7; + + // If specified, it holds information about an issuer of signing certificate or key (a person or entity + // who signed the signing certificate or key). + optional SignatureIssuer issuedBy = 8; + + // If specified, it holds information about a subject of signing certificate or key (a person or entity + // who signed the image). + optional SignatureSubject issuedTo = 9; +} + +// An ImageStream stores a mapping of tags to images, metadata overrides that are applied +// when images are tagged in a stream, and an optional reference to a container image +// repository on a registry. Users typically update the spec.tags field to point to external +// images which are imported from container registries using credentials in your namespace +// with the pull secret type, or to existing image stream tags and images which are +// immediately accessible for tagging or pulling. The history of images applied to a tag +// is visible in the status.tags field and any user who can view an image stream is allowed +// to tag that image into their own image streams. Access to pull images from the integrated +// registry is granted by having the "get imagestreams/layers" permission on a given image +// stream. Users may remove a tag by deleting the imagestreamtag resource, which causes both +// spec and status for that tag to be removed. Image stream history is retained until an +// administrator runs the prune operation, which removes references that are no longer in +// use. To preserve a historical image, ensure there is a tag in spec pointing to that image +// by its digest. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message ImageStream { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec describes the desired state of this stream + // +optional + optional ImageStreamSpec spec = 2; + + // Status describes the current state of this stream + // +optional + optional ImageStreamStatus status = 3; +} + +// ImageStreamImage represents an Image that is retrieved by image name from an ImageStream. +// User interfaces and regular users can use this resource to access the metadata details of +// a tagged image in the image stream history for viewing, since Image resources are not +// directly accessible to end users. A not found error will be returned if no such image is +// referenced by a tag within the ImageStream. Images are created when spec tags are set on +// an image stream that represent an image in an external registry, when pushing to the +// integrated registry, or when tagging an existing image from one image stream to another. +// The name of an image stream image is in the form "@", where the digest is +// the content addressible identifier for the image (sha256:xxxxx...). You can use +// ImageStreamImages as the from.kind of an image stream spec tag to reference an image +// exactly. The only operations supported on the imagestreamimage endpoint are retrieving +// the image. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message ImageStreamImage { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Image associated with the ImageStream and image name. + optional Image image = 2; +} + +// The image stream import resource provides an easy way for a user to find and import container images +// from other container image registries into the server. Individual images or an entire image repository may +// be imported, and users may choose to see the results of the import prior to tagging the resulting +// images into the specified image stream. +// +// This API is intended for end-user tools that need to see the metadata of the image prior to import +// (for instance, to generate an application from it). Clients that know the desired image can continue +// to create spec.tags directly into their image streams. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message ImageStreamImport { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec is a description of the images that the user wishes to import + optional ImageStreamImportSpec spec = 2; + + // Status is the result of importing the image + optional ImageStreamImportStatus status = 3; +} + +// ImageStreamImportSpec defines what images should be imported. +message ImageStreamImportSpec { + // Import indicates whether to perform an import - if so, the specified tags are set on the spec + // and status of the image stream defined by the type meta. + optional bool import = 1; + + // Repository is an optional import of an entire container image repository. A maximum limit on the + // number of tags imported this way is imposed by the server. + optional RepositoryImportSpec repository = 2; + + // Images are a list of individual images to import. + repeated ImageImportSpec images = 3; +} + +// ImageStreamImportStatus contains information about the status of an image stream import. +message ImageStreamImportStatus { + // Import is the image stream that was successfully updated or created when 'to' was set. + optional ImageStream import = 1; + + // Repository is set if spec.repository was set to the outcome of the import + optional RepositoryImportStatus repository = 2; + + // Images is set with the result of importing spec.images + repeated ImageImportStatus images = 3; +} + +// ImageStreamLayers describes information about the layers referenced by images in this +// image stream. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message ImageStreamLayers { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // blobs is a map of blob name to metadata about the blob. + map blobs = 2; + + // images is a map between an image name and the names of the blobs and config that + // comprise the image. + map images = 3; +} + +// ImageStreamList is a list of ImageStream objects. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message ImageStreamList { + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of imageStreams + repeated ImageStream items = 2; +} + +// ImageStreamMapping represents a mapping from a single image stream tag to a container +// image as well as the reference to the container image stream the image came from. This +// resource is used by privileged integrators to create an image resource and to associate +// it with an image stream in the status tags field. Creating an ImageStreamMapping will +// allow any user who can view the image stream to tag or pull that image, so only create +// mappings where the user has proven they have access to the image contents directly. +// The only operation supported for this resource is create and the metadata name and +// namespace should be set to the image stream containing the tag that should be updated. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message ImageStreamMapping { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Image is a container image. + optional Image image = 2; + + // Tag is a string value this image can be located with inside the stream. + optional string tag = 3; +} + +// ImageStreamSpec represents options for ImageStreams. +message ImageStreamSpec { + // lookupPolicy controls how other resources reference images within this namespace. + optional ImageLookupPolicy lookupPolicy = 3; + + // dockerImageRepository is optional, if specified this stream is backed by a container repository on this server + // Deprecated: This field is deprecated as of v3.7 and will be removed in a future release. + // Specify the source for the tags to be imported in each tag via the spec.tags.from reference instead. + optional string dockerImageRepository = 1; + + // tags map arbitrary string values to specific image locators + // +patchMergeKey=name + // +patchStrategy=merge + repeated TagReference tags = 2; +} + +// ImageStreamStatus contains information about the state of this image stream. +message ImageStreamStatus { + // DockerImageRepository represents the effective location this stream may be accessed at. + // May be empty until the server determines where the repository is located + optional string dockerImageRepository = 1; + + // PublicDockerImageRepository represents the public location from where the image can + // be pulled outside the cluster. This field may be empty if the administrator + // has not exposed the integrated registry externally. + optional string publicDockerImageRepository = 3; + + // Tags are a historical record of images associated with each tag. The first entry in the + // TagEvent array is the currently tagged image. + // +patchMergeKey=tag + // +patchStrategy=merge + repeated NamedTagEventList tags = 2; +} + +// ImageStreamTag represents an Image that is retrieved by tag name from an ImageStream. +// Use this resource to interact with the tags and images in an image stream by tag, or +// to see the image details for a particular tag. The image associated with this resource +// is the most recently successfully tagged, imported, or pushed image (as described in the +// image stream status.tags.items list for this tag). If an import is in progress or has +// failed the previous image will be shown. Deleting an image stream tag clears both the +// status and spec fields of an image stream. If no image can be retrieved for a given tag, +// a not found error will be returned. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message ImageStreamTag { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // tag is the spec tag associated with this image stream tag, and it may be null + // if only pushes have occurred to this image stream. + optional TagReference tag = 2; + + // generation is the current generation of the tagged image - if tag is provided + // and this value is not equal to the tag generation, a user has requested an + // import that has not completed, or conditions will be filled out indicating any + // error. + optional int64 generation = 3; + + // lookupPolicy indicates whether this tag will handle image references in this + // namespace. + optional ImageLookupPolicy lookupPolicy = 6; + + // conditions is an array of conditions that apply to the image stream tag. + repeated TagEventCondition conditions = 4; + + // image associated with the ImageStream and tag. + optional Image image = 5; +} + +// ImageStreamTagList is a list of ImageStreamTag objects. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message ImageStreamTagList { + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of image stream tags + repeated ImageStreamTag items = 2; +} + +// ImageTag represents a single tag within an image stream and includes the spec, +// the status history, and the currently referenced image (if any) of the provided +// tag. This type replaces the ImageStreamTag by providing a full view of the tag. +// ImageTags are returned for every spec or status tag present on the image stream. +// If no tag exists in either form a not found error will be returned by the API. +// A create operation will succeed if no spec tag has already been defined and the +// spec field is set. Delete will remove both spec and status elements from the +// image stream. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message ImageTag { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // spec is the spec tag associated with this image stream tag, and it may be null + // if only pushes have occurred to this image stream. + optional TagReference spec = 2; + + // status is the status tag details associated with this image stream tag, and it + // may be null if no push or import has been performed. + optional NamedTagEventList status = 3; + + // image is the details of the most recent image stream status tag, and it may be + // null if import has not completed or an administrator has deleted the image + // object. To verify this is the most recent image, you must verify the generation + // of the most recent status.items entry matches the spec tag (if a spec tag is + // set). This field will not be set when listing image tags. + optional Image image = 4; +} + +// ImageTagList is a list of ImageTag objects. When listing image tags, the image +// field is not populated. Tags are returned in alphabetical order by image stream +// and then tag. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message ImageTagList { + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of image stream tags + repeated ImageTag items = 2; +} + +// NamedTagEventList relates a tag to its image history. +message NamedTagEventList { + // Tag is the tag for which the history is recorded + optional string tag = 1; + + // Standard object's metadata. + repeated TagEvent items = 2; + + // Conditions is an array of conditions that apply to the tag event list. + repeated TagEventCondition conditions = 3; +} + +// RepositoryImportSpec describes a request to import images from a container image repository. +message RepositoryImportSpec { + // From is the source for the image repository to import; only kind DockerImage and a name of a container image repository is allowed + optional k8s.io.api.core.v1.ObjectReference from = 1; + + // ImportPolicy is the policy controlling how the image is imported + optional TagImportPolicy importPolicy = 2; + + // ReferencePolicy defines how other components should consume the image + optional TagReferencePolicy referencePolicy = 4; + + // IncludeManifest determines if the manifest for each image is returned in the response + optional bool includeManifest = 3; +} + +// RepositoryImportStatus describes the result of an image repository import +message RepositoryImportStatus { + // Status reflects whether any failure occurred during import + optional k8s.io.apimachinery.pkg.apis.meta.v1.Status status = 1; + + // Images is a list of images successfully retrieved by the import of the repository. + repeated ImageImportStatus images = 2; + + // AdditionalTags are tags that exist in the repository but were not imported because + // a maximum limit of automatic imports was applied. + repeated string additionalTags = 3; +} + +// SecretList is a list of Secret. +// +openshift:compatibility-gen:level=1 +message SecretList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of secret objects. + // More info: https://kubernetes.io/docs/concepts/configuration/secret + repeated k8s.io.api.core.v1.Secret items = 2; +} + +// SignatureCondition describes an image signature condition of particular kind at particular probe time. +message SignatureCondition { + // Type of signature condition, Complete or Failed. + optional string type = 1; + + // Status of the condition, one of True, False, Unknown. + optional string status = 2; + + // Last time the condition was checked. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastProbeTime = 3; + + // Last time the condition transit from one status to another. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4; + + // (brief) reason for the condition's last transition. + optional string reason = 5; + + // Human readable message indicating details about last transition. + optional string message = 6; +} + +// SignatureGenericEntity holds a generic information about a person or entity who is an issuer or a subject +// of signing certificate or key. +message SignatureGenericEntity { + // Organization name. + optional string organization = 1; + + // Common name (e.g. openshift-signing-service). + optional string commonName = 2; +} + +// SignatureIssuer holds information about an issuer of signing certificate or key. +message SignatureIssuer { + optional SignatureGenericEntity signatureGenericEntity = 1; +} + +// SignatureSubject holds information about a person or entity who created the signature. +message SignatureSubject { + optional SignatureGenericEntity signatureGenericEntity = 1; + + // If present, it is a human readable key id of public key belonging to the subject used to verify image + // signature. It should contain at least 64 lowest bits of public key's fingerprint (e.g. + // 0x685ebe62bf278440). + optional string publicKeyID = 2; +} + +// TagEvent is used by ImageStreamStatus to keep a historical record of images associated with a tag. +message TagEvent { + // Created holds the time the TagEvent was created + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time created = 1; + + // DockerImageReference is the string that can be used to pull this image + optional string dockerImageReference = 2; + + // Image is the image + optional string image = 3; + + // Generation is the spec tag generation that resulted in this tag being updated + optional int64 generation = 4; +} + +// TagEventCondition contains condition information for a tag event. +message TagEventCondition { + // Type of tag event condition, currently only ImportSuccess + optional string type = 1; + + // Status of the condition, one of True, False, Unknown. + optional string status = 2; + + // LastTransitionTIme is the time the condition transitioned from one status to another. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + + // Reason is a brief machine readable explanation for the condition's last transition. + optional string reason = 4; + + // Message is a human readable description of the details about last transition, complementing reason. + optional string message = 5; + + // Generation is the spec tag generation that this status corresponds to + optional int64 generation = 6; +} + +// TagImportPolicy controls how images related to this tag will be imported. +message TagImportPolicy { + // Insecure is true if the server may bypass certificate verification or connect directly over HTTP during image import. + optional bool insecure = 1; + + // Scheduled indicates to the server that this tag should be periodically checked to ensure it is up to date, and imported + optional bool scheduled = 2; + + // ImportMode describes how to import an image manifest. + optional string importMode = 3; +} + +// TagReference specifies optional annotations for images using this tag and an optional reference to an ImageStreamTag, ImageStreamImage, or DockerImage this tag should track. +message TagReference { + // Name of the tag + optional string name = 1; + + // Optional; if specified, annotations that are applied to images retrieved via ImageStreamTags. + // +optional + map annotations = 2; + + // Optional; if specified, a reference to another image that this tag should point to. Valid values + // are ImageStreamTag, ImageStreamImage, and DockerImage. ImageStreamTag references + // can only reference a tag within this same ImageStream. + optional k8s.io.api.core.v1.ObjectReference from = 3; + + // Reference states if the tag will be imported. Default value is false, which means the tag will + // be imported. + optional bool reference = 4; + + // Generation is a counter that tracks mutations to the spec tag (user intent). When a tag reference + // is changed the generation is set to match the current stream generation (which is incremented every + // time spec is changed). Other processes in the system like the image importer observe that the + // generation of spec tag is newer than the generation recorded in the status and use that as a trigger + // to import the newest remote tag. To trigger a new import, clients may set this value to zero which + // will reset the generation to the latest stream generation. Legacy clients will send this value as + // nil which will be merged with the current tag generation. + // +optional + optional int64 generation = 5; + + // ImportPolicy is information that controls how images may be imported by the server. + optional TagImportPolicy importPolicy = 6; + + // ReferencePolicy defines how other components should consume the image. + optional TagReferencePolicy referencePolicy = 7; +} + +// TagReferencePolicy describes how pull-specs for images in this image stream tag are generated when +// image change triggers in deployment configs or builds are resolved. This allows the image stream +// author to control how images are accessed. +message TagReferencePolicy { + // Type determines how the image pull spec should be transformed when the image stream tag is used in + // deployment config triggers or new builds. The default value is `Source`, indicating the original + // location of the image should be used (if imported). The user may also specify `Local`, indicating + // that the pull spec should point to the integrated container image registry and leverage the registry's + // ability to proxy the pull to an upstream registry. `Local` allows the credentials used to pull this + // image to be managed from the image stream's namespace, so others on the platform can access a remote + // image but have no access to the remote secret. It also allows the image layers to be mirrored into + // the local registry which the images can still be pulled even if the upstream registry is unavailable. + optional string type = 1; +} + diff --git a/vendor/github.com/openshift/api/image/v1/legacy.go b/vendor/github.com/openshift/api/image/v1/legacy.go new file mode 100644 index 000000000..02bbaa290 --- /dev/null +++ b/vendor/github.com/openshift/api/image/v1/legacy.go @@ -0,0 +1,33 @@ +package v1 + +import ( + "github.com/openshift/api/image/docker10" + "github.com/openshift/api/image/dockerpre012" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + legacyGroupVersion = schema.GroupVersion{Group: "", Version: "v1"} + legacySchemeBuilder = runtime.NewSchemeBuilder(addLegacyKnownTypes, docker10.AddToSchemeInCoreGroup, dockerpre012.AddToSchemeInCoreGroup, corev1.AddToScheme) + DeprecatedInstallWithoutGroup = legacySchemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addLegacyKnownTypes(scheme *runtime.Scheme) error { + types := []runtime.Object{ + &Image{}, + &ImageList{}, + &ImageSignature{}, + &ImageStream{}, + &ImageStreamList{}, + &ImageStreamMapping{}, + &ImageStreamTag{}, + &ImageStreamTagList{}, + &ImageStreamImage{}, + &ImageStreamImport{}, + } + scheme.AddKnownTypes(legacyGroupVersion, types...) + return nil +} diff --git a/vendor/github.com/openshift/api/image/v1/register.go b/vendor/github.com/openshift/api/image/v1/register.go new file mode 100644 index 000000000..0d924103a --- /dev/null +++ b/vendor/github.com/openshift/api/image/v1/register.go @@ -0,0 +1,54 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + "github.com/openshift/api/image/docker10" + "github.com/openshift/api/image/dockerpre012" +) + +var ( + GroupName = "image.openshift.io" + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, docker10.AddToScheme, dockerpre012.AddToScheme, corev1.AddToScheme) + // Install is a function which adds this version to a scheme + Install = schemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = schemeBuilder.AddToScheme +) + +// Resource generated code relies on this being here, but it logically belongs to the group +// DEPRECATED +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &Image{}, + &ImageList{}, + &ImageSignature{}, + &ImageStream{}, + &ImageStreamList{}, + &ImageStreamMapping{}, + &ImageStreamTag{}, + &ImageStreamTagList{}, + &ImageStreamImage{}, + &ImageStreamLayers{}, + &ImageStreamImport{}, + &ImageTag{}, + &ImageTagList{}, + &SecretList{}, + ) + metav1.AddToGroupVersion(scheme, GroupVersion) + return nil +} diff --git a/vendor/github.com/openshift/api/image/v1/types.go b/vendor/github.com/openshift/api/image/v1/types.go new file mode 100644 index 000000000..9919c0fe7 --- /dev/null +++ b/vendor/github.com/openshift/api/image/v1/types.go @@ -0,0 +1,766 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ImageList is a list of Image objects. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ImageList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of images + Items []Image `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Image is an immutable representation of a container image and metadata at a point in time. +// Images are named by taking a hash of their contents (metadata and content) and any change +// in format, content, or metadata results in a new name. The images resource is primarily +// for use by cluster administrators and integrations like the cluster image registry - end +// users instead access images via the imagestreamtags or imagestreamimages resources. While +// image metadata is stored in the API, any integration that implements the container image +// registry API must provide its own storage for the raw manifest data, image config, and +// layer contents. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type Image struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // DockerImageReference is the string that can be used to pull this image. + DockerImageReference string `json:"dockerImageReference,omitempty" protobuf:"bytes,2,opt,name=dockerImageReference"` + // DockerImageMetadata contains metadata about this image + // +patchStrategy=replace + // +kubebuilder:pruning:PreserveUnknownFields + DockerImageMetadata runtime.RawExtension `json:"dockerImageMetadata,omitempty" patchStrategy:"replace" protobuf:"bytes,3,opt,name=dockerImageMetadata"` + // DockerImageMetadataVersion conveys the version of the object, which if empty defaults to "1.0" + DockerImageMetadataVersion string `json:"dockerImageMetadataVersion,omitempty" protobuf:"bytes,4,opt,name=dockerImageMetadataVersion"` + // DockerImageManifest is the raw JSON of the manifest + DockerImageManifest string `json:"dockerImageManifest,omitempty" protobuf:"bytes,5,opt,name=dockerImageManifest"` + // DockerImageLayers represents the layers in the image. May not be set if the image does not define that data or if the image represents a manifest list. + DockerImageLayers []ImageLayer `json:"dockerImageLayers,omitempty" protobuf:"bytes,6,rep,name=dockerImageLayers"` + // Signatures holds all signatures of the image. + // +patchMergeKey=name + // +patchStrategy=merge + Signatures []ImageSignature `json:"signatures,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=signatures"` + // DockerImageSignatures provides the signatures as opaque blobs. This is a part of manifest schema v1. + DockerImageSignatures [][]byte `json:"dockerImageSignatures,omitempty" protobuf:"bytes,8,rep,name=dockerImageSignatures"` + // DockerImageManifestMediaType specifies the mediaType of manifest. This is a part of manifest schema v2. + DockerImageManifestMediaType string `json:"dockerImageManifestMediaType,omitempty" protobuf:"bytes,9,opt,name=dockerImageManifestMediaType"` + // DockerImageConfig is a JSON blob that the runtime uses to set up the container. This is a part of manifest schema v2. + // Will not be set when the image represents a manifest list. + DockerImageConfig string `json:"dockerImageConfig,omitempty" protobuf:"bytes,10,opt,name=dockerImageConfig"` + // DockerImageManifests holds information about sub-manifests when the image represents a manifest list. + // When this field is present, no DockerImageLayers should be specified. + DockerImageManifests []ImageManifest `json:"dockerImageManifests,omitempty" protobuf:"bytes,11,rep,name=dockerImageManifests"` +} + +// ImageManifest represents sub-manifests of a manifest list. The Digest field points to a regular +// Image object. +type ImageManifest struct { + // Digest is the unique identifier for the manifest. It refers to an Image object. + Digest string `json:"digest" protobuf:"bytes,1,opt,name=digest"` + // MediaType defines the type of the manifest, possible values are application/vnd.oci.image.manifest.v1+json, + // application/vnd.docker.distribution.manifest.v2+json or application/vnd.docker.distribution.manifest.v1+json. + MediaType string `json:"mediaType" protobuf:"bytes,2,opt,name=mediaType"` + // ManifestSize represents the size of the raw object contents, in bytes. + ManifestSize int64 `json:"manifestSize" protobuf:"varint,3,opt,name=manifestSize"` + // Architecture specifies the supported CPU architecture, for example `amd64` or `ppc64le`. + Architecture string `json:"architecture" protobuf:"bytes,4,opt,name=architecture"` + // OS specifies the operating system, for example `linux`. + OS string `json:"os" protobuf:"bytes,5,opt,name=os"` + // Variant is an optional field repreenting a variant of the CPU, for example v6 to specify a particular CPU + // variant of the ARM CPU. + Variant string `json:"variant,omitempty" protobuf:"bytes,6,opt,name=variant"` +} + +// ImageLayer represents a single layer of the image. Some images may have multiple layers. Some may have none. +type ImageLayer struct { + // Name of the layer as defined by the underlying store. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // Size of the layer in bytes as defined by the underlying store. + LayerSize int64 `json:"size" protobuf:"varint,2,opt,name=size"` + // MediaType of the referenced object. + MediaType string `json:"mediaType" protobuf:"bytes,3,opt,name=mediaType"` +} + +// +genclient +// +genclient:onlyVerbs=create,delete +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ImageSignature holds a signature of an image. It allows to verify image identity and possibly other claims +// as long as the signature is trusted. Based on this information it is possible to restrict runnable images +// to those matching cluster-wide policy. +// Mandatory fields should be parsed by clients doing image verification. The others are parsed from +// signature's content by the server. They serve just an informative purpose. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ImageSignature struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Required: Describes a type of stored blob. + Type string `json:"type" protobuf:"bytes,2,opt,name=type"` + // Required: An opaque binary string which is an image's signature. + Content []byte `json:"content" protobuf:"bytes,3,opt,name=content"` + // Conditions represent the latest available observations of a signature's current state. + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []SignatureCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,4,rep,name=conditions"` + + // Following metadata fields will be set by server if the signature content is successfully parsed and + // the information available. + + // A human readable string representing image's identity. It could be a product name and version, or an + // image pull spec (e.g. "registry.access.redhat.com/rhel7/rhel:7.2"). + ImageIdentity string `json:"imageIdentity,omitempty" protobuf:"bytes,5,opt,name=imageIdentity"` + // Contains claims from the signature. + SignedClaims map[string]string `json:"signedClaims,omitempty" protobuf:"bytes,6,rep,name=signedClaims"` + // If specified, it is the time of signature's creation. + Created *metav1.Time `json:"created,omitempty" protobuf:"bytes,7,opt,name=created"` + // If specified, it holds information about an issuer of signing certificate or key (a person or entity + // who signed the signing certificate or key). + IssuedBy *SignatureIssuer `json:"issuedBy,omitempty" protobuf:"bytes,8,opt,name=issuedBy"` + // If specified, it holds information about a subject of signing certificate or key (a person or entity + // who signed the image). + IssuedTo *SignatureSubject `json:"issuedTo,omitempty" protobuf:"bytes,9,opt,name=issuedTo"` +} + +// SignatureConditionType is a type of image signature condition. +type SignatureConditionType string + +// SignatureCondition describes an image signature condition of particular kind at particular probe time. +type SignatureCondition struct { + // Type of signature condition, Complete or Failed. + Type SignatureConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=SignatureConditionType"` + // Status of the condition, one of True, False, Unknown. + Status corev1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"` + // Last time the condition was checked. + LastProbeTime metav1.Time `json:"lastProbeTime,omitempty" protobuf:"bytes,3,opt,name=lastProbeTime"` + // Last time the condition transit from one status to another. + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"` + // (brief) reason for the condition's last transition. + Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"` + // Human readable message indicating details about last transition. + Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"` +} + +// SignatureGenericEntity holds a generic information about a person or entity who is an issuer or a subject +// of signing certificate or key. +type SignatureGenericEntity struct { + // Organization name. + Organization string `json:"organization,omitempty" protobuf:"bytes,1,opt,name=organization"` + // Common name (e.g. openshift-signing-service). + CommonName string `json:"commonName,omitempty" protobuf:"bytes,2,opt,name=commonName"` +} + +// SignatureIssuer holds information about an issuer of signing certificate or key. +type SignatureIssuer struct { + SignatureGenericEntity `json:",inline" protobuf:"bytes,1,opt,name=signatureGenericEntity"` +} + +// SignatureSubject holds information about a person or entity who created the signature. +type SignatureSubject struct { + SignatureGenericEntity `json:",inline" protobuf:"bytes,1,opt,name=signatureGenericEntity"` + // If present, it is a human readable key id of public key belonging to the subject used to verify image + // signature. It should contain at least 64 lowest bits of public key's fingerprint (e.g. + // 0x685ebe62bf278440). + PublicKeyID string `json:"publicKeyID" protobuf:"bytes,2,opt,name=publicKeyID"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ImageStreamList is a list of ImageStream objects. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ImageStreamList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of imageStreams + Items []ImageStream `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +genclient:method=Secrets,verb=get,subresource=secrets,result=github.com/openshift/api/image/v1.SecretList +// +genclient:method=Layers,verb=get,subresource=layers,result=github.com/openshift/api/image/v1.ImageStreamLayers +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// An ImageStream stores a mapping of tags to images, metadata overrides that are applied +// when images are tagged in a stream, and an optional reference to a container image +// repository on a registry. Users typically update the spec.tags field to point to external +// images which are imported from container registries using credentials in your namespace +// with the pull secret type, or to existing image stream tags and images which are +// immediately accessible for tagging or pulling. The history of images applied to a tag +// is visible in the status.tags field and any user who can view an image stream is allowed +// to tag that image into their own image streams. Access to pull images from the integrated +// registry is granted by having the "get imagestreams/layers" permission on a given image +// stream. Users may remove a tag by deleting the imagestreamtag resource, which causes both +// spec and status for that tag to be removed. Image stream history is retained until an +// administrator runs the prune operation, which removes references that are no longer in +// use. To preserve a historical image, ensure there is a tag in spec pointing to that image +// by its digest. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ImageStream struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec describes the desired state of this stream + // +optional + Spec ImageStreamSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + // Status describes the current state of this stream + // +optional + Status ImageStreamStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// ImageStreamSpec represents options for ImageStreams. +type ImageStreamSpec struct { + // lookupPolicy controls how other resources reference images within this namespace. + LookupPolicy ImageLookupPolicy `json:"lookupPolicy,omitempty" protobuf:"bytes,3,opt,name=lookupPolicy"` + // dockerImageRepository is optional, if specified this stream is backed by a container repository on this server + // Deprecated: This field is deprecated as of v3.7 and will be removed in a future release. + // Specify the source for the tags to be imported in each tag via the spec.tags.from reference instead. + DockerImageRepository string `json:"dockerImageRepository,omitempty" protobuf:"bytes,1,opt,name=dockerImageRepository"` + // tags map arbitrary string values to specific image locators + // +patchMergeKey=name + // +patchStrategy=merge + Tags []TagReference `json:"tags,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=tags"` +} + +// ImageLookupPolicy describes how an image stream can be used to override the image references +// used by pods, builds, and other resources in a namespace. +type ImageLookupPolicy struct { + // local will change the docker short image references (like "mysql" or + // "php:latest") on objects in this namespace to the image ID whenever they match + // this image stream, instead of reaching out to a remote registry. The name will + // be fully qualified to an image ID if found. The tag's referencePolicy is taken + // into account on the replaced value. Only works within the current namespace. + Local bool `json:"local" protobuf:"varint,3,opt,name=local"` +} + +// TagReference specifies optional annotations for images using this tag and an optional reference to an ImageStreamTag, ImageStreamImage, or DockerImage this tag should track. +type TagReference struct { + // Name of the tag + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // Optional; if specified, annotations that are applied to images retrieved via ImageStreamTags. + // +optional + Annotations map[string]string `json:"annotations" protobuf:"bytes,2,rep,name=annotations"` + // Optional; if specified, a reference to another image that this tag should point to. Valid values + // are ImageStreamTag, ImageStreamImage, and DockerImage. ImageStreamTag references + // can only reference a tag within this same ImageStream. + From *corev1.ObjectReference `json:"from,omitempty" protobuf:"bytes,3,opt,name=from"` + // Reference states if the tag will be imported. Default value is false, which means the tag will + // be imported. + Reference bool `json:"reference,omitempty" protobuf:"varint,4,opt,name=reference"` + // Generation is a counter that tracks mutations to the spec tag (user intent). When a tag reference + // is changed the generation is set to match the current stream generation (which is incremented every + // time spec is changed). Other processes in the system like the image importer observe that the + // generation of spec tag is newer than the generation recorded in the status and use that as a trigger + // to import the newest remote tag. To trigger a new import, clients may set this value to zero which + // will reset the generation to the latest stream generation. Legacy clients will send this value as + // nil which will be merged with the current tag generation. + // +optional + Generation *int64 `json:"generation" protobuf:"varint,5,opt,name=generation"` + // ImportPolicy is information that controls how images may be imported by the server. + ImportPolicy TagImportPolicy `json:"importPolicy,omitempty" protobuf:"bytes,6,opt,name=importPolicy"` + // ReferencePolicy defines how other components should consume the image. + ReferencePolicy TagReferencePolicy `json:"referencePolicy,omitempty" protobuf:"bytes,7,opt,name=referencePolicy"` +} + +// TagImportPolicy controls how images related to this tag will be imported. +type TagImportPolicy struct { + // Insecure is true if the server may bypass certificate verification or connect directly over HTTP during image import. + Insecure bool `json:"insecure,omitempty" protobuf:"varint,1,opt,name=insecure"` + // Scheduled indicates to the server that this tag should be periodically checked to ensure it is up to date, and imported + Scheduled bool `json:"scheduled,omitempty" protobuf:"varint,2,opt,name=scheduled"` + // ImportMode describes how to import an image manifest. + ImportMode ImportModeType `json:"importMode,omitempty" protobuf:"bytes,3,opt,name=importMode,casttype=ImportModeType"` +} + +// ImportModeType describes how to import an image manifest. +type ImportModeType string + +const ( + // ImportModeLegacy indicates that the legacy behaviour should be used. + // For manifest lists, the legacy behaviour will discard the manifest list and import a single + // sub-manifest. In this case, the platform is chosen in the following order of priority: + // 1. tag annotations; 2. control plane arch/os; 3. linux/amd64; 4. the first manifest in the list. + // This mode is the default. + ImportModeLegacy ImportModeType = "Legacy" + // ImportModePreserveOriginal indicates that the original manifest will be preserved. + // For manifest lists, the manifest list and all its sub-manifests will be imported. + ImportModePreserveOriginal ImportModeType = "PreserveOriginal" +) + +// TagReferencePolicyType describes how pull-specs for images in an image stream tag are generated when +// image change triggers are fired. +type TagReferencePolicyType string + +const ( + // SourceTagReferencePolicy indicates the image's original location should be used when the image stream tag + // is resolved into other resources (builds and deployment configurations). + SourceTagReferencePolicy TagReferencePolicyType = "Source" + // LocalTagReferencePolicy indicates the image should prefer to pull via the local integrated registry, + // falling back to the remote location if the integrated registry has not been configured. The reference will + // use the internal DNS name or registry service IP. + LocalTagReferencePolicy TagReferencePolicyType = "Local" +) + +// TagReferencePolicy describes how pull-specs for images in this image stream tag are generated when +// image change triggers in deployment configs or builds are resolved. This allows the image stream +// author to control how images are accessed. +type TagReferencePolicy struct { + // Type determines how the image pull spec should be transformed when the image stream tag is used in + // deployment config triggers or new builds. The default value is `Source`, indicating the original + // location of the image should be used (if imported). The user may also specify `Local`, indicating + // that the pull spec should point to the integrated container image registry and leverage the registry's + // ability to proxy the pull to an upstream registry. `Local` allows the credentials used to pull this + // image to be managed from the image stream's namespace, so others on the platform can access a remote + // image but have no access to the remote secret. It also allows the image layers to be mirrored into + // the local registry which the images can still be pulled even if the upstream registry is unavailable. + Type TagReferencePolicyType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=TagReferencePolicyType"` +} + +// ImageStreamStatus contains information about the state of this image stream. +type ImageStreamStatus struct { + // DockerImageRepository represents the effective location this stream may be accessed at. + // May be empty until the server determines where the repository is located + DockerImageRepository string `json:"dockerImageRepository" protobuf:"bytes,1,opt,name=dockerImageRepository"` + // PublicDockerImageRepository represents the public location from where the image can + // be pulled outside the cluster. This field may be empty if the administrator + // has not exposed the integrated registry externally. + PublicDockerImageRepository string `json:"publicDockerImageRepository,omitempty" protobuf:"bytes,3,opt,name=publicDockerImageRepository"` + // Tags are a historical record of images associated with each tag. The first entry in the + // TagEvent array is the currently tagged image. + // +patchMergeKey=tag + // +patchStrategy=merge + Tags []NamedTagEventList `json:"tags,omitempty" patchStrategy:"merge" patchMergeKey:"tag" protobuf:"bytes,2,rep,name=tags"` +} + +// NamedTagEventList relates a tag to its image history. +type NamedTagEventList struct { + // Tag is the tag for which the history is recorded + Tag string `json:"tag" protobuf:"bytes,1,opt,name=tag"` + // Standard object's metadata. + Items []TagEvent `json:"items" protobuf:"bytes,2,rep,name=items"` + // Conditions is an array of conditions that apply to the tag event list. + Conditions []TagEventCondition `json:"conditions,omitempty" protobuf:"bytes,3,rep,name=conditions"` +} + +// TagEvent is used by ImageStreamStatus to keep a historical record of images associated with a tag. +type TagEvent struct { + // Created holds the time the TagEvent was created + Created metav1.Time `json:"created" protobuf:"bytes,1,opt,name=created"` + // DockerImageReference is the string that can be used to pull this image + DockerImageReference string `json:"dockerImageReference" protobuf:"bytes,2,opt,name=dockerImageReference"` + // Image is the image + Image string `json:"image" protobuf:"bytes,3,opt,name=image"` + // Generation is the spec tag generation that resulted in this tag being updated + Generation int64 `json:"generation" protobuf:"varint,4,opt,name=generation"` +} + +type TagEventConditionType string + +// These are valid conditions of TagEvents. +const ( + // ImportSuccess with status False means the import of the specific tag failed + ImportSuccess TagEventConditionType = "ImportSuccess" +) + +// TagEventCondition contains condition information for a tag event. +type TagEventCondition struct { + // Type of tag event condition, currently only ImportSuccess + Type TagEventConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=TagEventConditionType"` + // Status of the condition, one of True, False, Unknown. + Status corev1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"` + // LastTransitionTIme is the time the condition transitioned from one status to another. + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // Reason is a brief machine readable explanation for the condition's last transition. + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // Message is a human readable description of the details about last transition, complementing reason. + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` + // Generation is the spec tag generation that this status corresponds to + Generation int64 `json:"generation" protobuf:"varint,6,opt,name=generation"` +} + +// +genclient +// +genclient:skipVerbs=get,list,create,update,patch,delete,deleteCollection,watch +// +genclient:method=Create,verb=create,result=k8s.io/apimachinery/pkg/apis/meta/v1.Status +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ImageStreamMapping represents a mapping from a single image stream tag to a container +// image as well as the reference to the container image stream the image came from. This +// resource is used by privileged integrators to create an image resource and to associate +// it with an image stream in the status tags field. Creating an ImageStreamMapping will +// allow any user who can view the image stream to tag or pull that image, so only create +// mappings where the user has proven they have access to the image contents directly. +// The only operation supported for this resource is create and the metadata name and +// namespace should be set to the image stream containing the tag that should be updated. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ImageStreamMapping struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Image is a container image. + Image Image `json:"image" protobuf:"bytes,2,opt,name=image"` + // Tag is a string value this image can be located with inside the stream. + Tag string `json:"tag" protobuf:"bytes,3,opt,name=tag"` +} + +// +genclient +// +genclient:onlyVerbs=get,list,create,update,delete +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ImageStreamTag represents an Image that is retrieved by tag name from an ImageStream. +// Use this resource to interact with the tags and images in an image stream by tag, or +// to see the image details for a particular tag. The image associated with this resource +// is the most recently successfully tagged, imported, or pushed image (as described in the +// image stream status.tags.items list for this tag). If an import is in progress or has +// failed the previous image will be shown. Deleting an image stream tag clears both the +// status and spec fields of an image stream. If no image can be retrieved for a given tag, +// a not found error will be returned. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ImageStreamTag struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // tag is the spec tag associated with this image stream tag, and it may be null + // if only pushes have occurred to this image stream. + Tag *TagReference `json:"tag" protobuf:"bytes,2,opt,name=tag"` + + // generation is the current generation of the tagged image - if tag is provided + // and this value is not equal to the tag generation, a user has requested an + // import that has not completed, or conditions will be filled out indicating any + // error. + Generation int64 `json:"generation" protobuf:"varint,3,opt,name=generation"` + + // lookupPolicy indicates whether this tag will handle image references in this + // namespace. + LookupPolicy ImageLookupPolicy `json:"lookupPolicy" protobuf:"varint,6,opt,name=lookupPolicy"` + + // conditions is an array of conditions that apply to the image stream tag. + Conditions []TagEventCondition `json:"conditions,omitempty" protobuf:"bytes,4,rep,name=conditions"` + + // image associated with the ImageStream and tag. + Image Image `json:"image" protobuf:"bytes,5,opt,name=image"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ImageStreamTagList is a list of ImageStreamTag objects. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ImageStreamTagList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of image stream tags + Items []ImageStreamTag `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +genclient:onlyVerbs=get,list,create,update,delete +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ImageTag represents a single tag within an image stream and includes the spec, +// the status history, and the currently referenced image (if any) of the provided +// tag. This type replaces the ImageStreamTag by providing a full view of the tag. +// ImageTags are returned for every spec or status tag present on the image stream. +// If no tag exists in either form a not found error will be returned by the API. +// A create operation will succeed if no spec tag has already been defined and the +// spec field is set. Delete will remove both spec and status elements from the +// image stream. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ImageTag struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // spec is the spec tag associated with this image stream tag, and it may be null + // if only pushes have occurred to this image stream. + Spec *TagReference `json:"spec" protobuf:"bytes,2,opt,name=spec"` + // status is the status tag details associated with this image stream tag, and it + // may be null if no push or import has been performed. + Status *NamedTagEventList `json:"status" protobuf:"bytes,3,opt,name=status"` + // image is the details of the most recent image stream status tag, and it may be + // null if import has not completed or an administrator has deleted the image + // object. To verify this is the most recent image, you must verify the generation + // of the most recent status.items entry matches the spec tag (if a spec tag is + // set). This field will not be set when listing image tags. + Image *Image `json:"image" protobuf:"bytes,4,opt,name=image"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ImageTagList is a list of ImageTag objects. When listing image tags, the image +// field is not populated. Tags are returned in alphabetical order by image stream +// and then tag. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ImageTagList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of image stream tags + Items []ImageTag `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +genclient:onlyVerbs=get +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ImageStreamImage represents an Image that is retrieved by image name from an ImageStream. +// User interfaces and regular users can use this resource to access the metadata details of +// a tagged image in the image stream history for viewing, since Image resources are not +// directly accessible to end users. A not found error will be returned if no such image is +// referenced by a tag within the ImageStream. Images are created when spec tags are set on +// an image stream that represent an image in an external registry, when pushing to the +// integrated registry, or when tagging an existing image from one image stream to another. +// The name of an image stream image is in the form "@", where the digest is +// the content addressible identifier for the image (sha256:xxxxx...). You can use +// ImageStreamImages as the from.kind of an image stream spec tag to reference an image +// exactly. The only operations supported on the imagestreamimage endpoint are retrieving +// the image. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ImageStreamImage struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Image associated with the ImageStream and image name. + Image Image `json:"image" protobuf:"bytes,2,opt,name=image"` +} + +// DockerImageReference points to a container image. +type DockerImageReference struct { + // Registry is the registry that contains the container image + Registry string `protobuf:"bytes,1,opt,name=registry"` + // Namespace is the namespace that contains the container image + Namespace string `protobuf:"bytes,2,opt,name=namespace"` + // Name is the name of the container image + Name string `protobuf:"bytes,3,opt,name=name"` + // Tag is which tag of the container image is being referenced + Tag string `protobuf:"bytes,4,opt,name=tag"` + // ID is the identifier for the container image + ID string `protobuf:"bytes,5,opt,name=iD"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ImageStreamLayers describes information about the layers referenced by images in this +// image stream. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ImageStreamLayers struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // blobs is a map of blob name to metadata about the blob. + Blobs map[string]ImageLayerData `json:"blobs" protobuf:"bytes,2,rep,name=blobs"` + // images is a map between an image name and the names of the blobs and config that + // comprise the image. + Images map[string]ImageBlobReferences `json:"images" protobuf:"bytes,3,rep,name=images"` +} + +// ImageBlobReferences describes the blob references within an image. +type ImageBlobReferences struct { + // imageMissing is true if the image is referenced by the image stream but the image + // object has been deleted from the API by an administrator. When this field is set, + // layers and config fields may be empty and callers that depend on the image metadata + // should consider the image to be unavailable for download or viewing. + // +optional + ImageMissing bool `json:"imageMissing" protobuf:"varint,3,opt,name=imageMissing"` + // layers is the list of blobs that compose this image, from base layer to top layer. + // All layers referenced by this array will be defined in the blobs map. Some images + // may have zero layers. + // +optional + Layers []string `json:"layers" protobuf:"bytes,1,rep,name=layers"` + // config, if set, is the blob that contains the image config. Some images do + // not have separate config blobs and this field will be set to nil if so. + // +optional + Config *string `json:"config" protobuf:"bytes,2,opt,name=config"` + // manifests is the list of other image names that this image points + // to. For a single architecture image, it is empty. For a multi-arch + // image, it consists of the digests of single architecture images, + // such images shouldn't have layers nor config. + // +optional + Manifests []string `json:"manifests,omitempty" protobuf:"bytes,4,rep,name=manifests"` +} + +// ImageLayerData contains metadata about an image layer. +type ImageLayerData struct { + // Size of the layer in bytes as defined by the underlying store. This field is + // optional if the necessary information about size is not available. + LayerSize *int64 `json:"size" protobuf:"varint,1,opt,name=size"` + // MediaType of the referenced object. + MediaType string `json:"mediaType" protobuf:"bytes,2,opt,name=mediaType"` +} + +// +genclient +// +genclient:onlyVerbs=create +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// The image stream import resource provides an easy way for a user to find and import container images +// from other container image registries into the server. Individual images or an entire image repository may +// be imported, and users may choose to see the results of the import prior to tagging the resulting +// images into the specified image stream. +// +// This API is intended for end-user tools that need to see the metadata of the image prior to import +// (for instance, to generate an application from it). Clients that know the desired image can continue +// to create spec.tags directly into their image streams. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ImageStreamImport struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec is a description of the images that the user wishes to import + Spec ImageStreamImportSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + // Status is the result of importing the image + Status ImageStreamImportStatus `json:"status" protobuf:"bytes,3,opt,name=status"` +} + +// ImageStreamImportSpec defines what images should be imported. +type ImageStreamImportSpec struct { + // Import indicates whether to perform an import - if so, the specified tags are set on the spec + // and status of the image stream defined by the type meta. + Import bool `json:"import" protobuf:"varint,1,opt,name=import"` + // Repository is an optional import of an entire container image repository. A maximum limit on the + // number of tags imported this way is imposed by the server. + Repository *RepositoryImportSpec `json:"repository,omitempty" protobuf:"bytes,2,opt,name=repository"` + // Images are a list of individual images to import. + Images []ImageImportSpec `json:"images,omitempty" protobuf:"bytes,3,rep,name=images"` +} + +// ImageStreamImportStatus contains information about the status of an image stream import. +type ImageStreamImportStatus struct { + // Import is the image stream that was successfully updated or created when 'to' was set. + Import *ImageStream `json:"import,omitempty" protobuf:"bytes,1,opt,name=import"` + // Repository is set if spec.repository was set to the outcome of the import + Repository *RepositoryImportStatus `json:"repository,omitempty" protobuf:"bytes,2,opt,name=repository"` + // Images is set with the result of importing spec.images + Images []ImageImportStatus `json:"images,omitempty" protobuf:"bytes,3,rep,name=images"` +} + +// RepositoryImportSpec describes a request to import images from a container image repository. +type RepositoryImportSpec struct { + // From is the source for the image repository to import; only kind DockerImage and a name of a container image repository is allowed + From corev1.ObjectReference `json:"from" protobuf:"bytes,1,opt,name=from"` + + // ImportPolicy is the policy controlling how the image is imported + ImportPolicy TagImportPolicy `json:"importPolicy,omitempty" protobuf:"bytes,2,opt,name=importPolicy"` + // ReferencePolicy defines how other components should consume the image + ReferencePolicy TagReferencePolicy `json:"referencePolicy,omitempty" protobuf:"bytes,4,opt,name=referencePolicy"` + // IncludeManifest determines if the manifest for each image is returned in the response + IncludeManifest bool `json:"includeManifest,omitempty" protobuf:"varint,3,opt,name=includeManifest"` +} + +// RepositoryImportStatus describes the result of an image repository import +type RepositoryImportStatus struct { + // Status reflects whether any failure occurred during import + Status metav1.Status `json:"status,omitempty" protobuf:"bytes,1,opt,name=status"` + // Images is a list of images successfully retrieved by the import of the repository. + Images []ImageImportStatus `json:"images,omitempty" protobuf:"bytes,2,rep,name=images"` + // AdditionalTags are tags that exist in the repository but were not imported because + // a maximum limit of automatic imports was applied. + AdditionalTags []string `json:"additionalTags,omitempty" protobuf:"bytes,3,rep,name=additionalTags"` +} + +// ImageImportSpec describes a request to import a specific image. +type ImageImportSpec struct { + // From is the source of an image to import; only kind DockerImage is allowed + From corev1.ObjectReference `json:"from" protobuf:"bytes,1,opt,name=from"` + // To is a tag in the current image stream to assign the imported image to, if name is not specified the default tag from from.name will be used + To *corev1.LocalObjectReference `json:"to,omitempty" protobuf:"bytes,2,opt,name=to"` + + // ImportPolicy is the policy controlling how the image is imported + ImportPolicy TagImportPolicy `json:"importPolicy,omitempty" protobuf:"bytes,3,opt,name=importPolicy"` + // ReferencePolicy defines how other components should consume the image + ReferencePolicy TagReferencePolicy `json:"referencePolicy,omitempty" protobuf:"bytes,5,opt,name=referencePolicy"` + // IncludeManifest determines if the manifest for each image is returned in the response + IncludeManifest bool `json:"includeManifest,omitempty" protobuf:"varint,4,opt,name=includeManifest"` +} + +// ImageImportStatus describes the result of an image import. +type ImageImportStatus struct { + // Status is the status of the image import, including errors encountered while retrieving the image + Status metav1.Status `json:"status" protobuf:"bytes,1,opt,name=status"` + // Image is the metadata of that image, if the image was located + Image *Image `json:"image,omitempty" protobuf:"bytes,2,opt,name=image"` + // Tag is the tag this image was located under, if any + Tag string `json:"tag,omitempty" protobuf:"bytes,3,opt,name=tag"` + // Manifests holds sub-manifests metadata when importing a manifest list + Manifests []Image `json:"manifests,omitempty" protobuf:"bytes,4,rep,name=manifests"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// SecretList is a list of Secret. +// +openshift:compatibility-gen:level=1 +type SecretList corev1.SecretList diff --git a/vendor/github.com/openshift/api/image/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/image/v1/zz_generated.deepcopy.go new file mode 100644 index 000000000..953f70263 --- /dev/null +++ b/vendor/github.com/openshift/api/image/v1/zz_generated.deepcopy.go @@ -0,0 +1,1045 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DockerImageReference) DeepCopyInto(out *DockerImageReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerImageReference. +func (in *DockerImageReference) DeepCopy() *DockerImageReference { + if in == nil { + return nil + } + out := new(DockerImageReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Image) DeepCopyInto(out *Image) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.DockerImageMetadata.DeepCopyInto(&out.DockerImageMetadata) + if in.DockerImageLayers != nil { + in, out := &in.DockerImageLayers, &out.DockerImageLayers + *out = make([]ImageLayer, len(*in)) + copy(*out, *in) + } + if in.Signatures != nil { + in, out := &in.Signatures, &out.Signatures + *out = make([]ImageSignature, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DockerImageSignatures != nil { + in, out := &in.DockerImageSignatures, &out.DockerImageSignatures + *out = make([][]byte, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = make([]byte, len(*in)) + copy(*out, *in) + } + } + } + if in.DockerImageManifests != nil { + in, out := &in.DockerImageManifests, &out.DockerImageManifests + *out = make([]ImageManifest, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Image. +func (in *Image) DeepCopy() *Image { + if in == nil { + return nil + } + out := new(Image) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Image) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageBlobReferences) DeepCopyInto(out *ImageBlobReferences) { + *out = *in + if in.Layers != nil { + in, out := &in.Layers, &out.Layers + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = new(string) + **out = **in + } + if in.Manifests != nil { + in, out := &in.Manifests, &out.Manifests + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageBlobReferences. +func (in *ImageBlobReferences) DeepCopy() *ImageBlobReferences { + if in == nil { + return nil + } + out := new(ImageBlobReferences) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageImportSpec) DeepCopyInto(out *ImageImportSpec) { + *out = *in + out.From = in.From + if in.To != nil { + in, out := &in.To, &out.To + *out = new(corev1.LocalObjectReference) + **out = **in + } + out.ImportPolicy = in.ImportPolicy + out.ReferencePolicy = in.ReferencePolicy + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageImportSpec. +func (in *ImageImportSpec) DeepCopy() *ImageImportSpec { + if in == nil { + return nil + } + out := new(ImageImportSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageImportStatus) DeepCopyInto(out *ImageImportStatus) { + *out = *in + in.Status.DeepCopyInto(&out.Status) + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = new(Image) + (*in).DeepCopyInto(*out) + } + if in.Manifests != nil { + in, out := &in.Manifests, &out.Manifests + *out = make([]Image, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageImportStatus. +func (in *ImageImportStatus) DeepCopy() *ImageImportStatus { + if in == nil { + return nil + } + out := new(ImageImportStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageLayer) DeepCopyInto(out *ImageLayer) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageLayer. +func (in *ImageLayer) DeepCopy() *ImageLayer { + if in == nil { + return nil + } + out := new(ImageLayer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageLayerData) DeepCopyInto(out *ImageLayerData) { + *out = *in + if in.LayerSize != nil { + in, out := &in.LayerSize, &out.LayerSize + *out = new(int64) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageLayerData. +func (in *ImageLayerData) DeepCopy() *ImageLayerData { + if in == nil { + return nil + } + out := new(ImageLayerData) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageList) DeepCopyInto(out *ImageList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Image, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageList. +func (in *ImageList) DeepCopy() *ImageList { + if in == nil { + return nil + } + out := new(ImageList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageLookupPolicy) DeepCopyInto(out *ImageLookupPolicy) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageLookupPolicy. +func (in *ImageLookupPolicy) DeepCopy() *ImageLookupPolicy { + if in == nil { + return nil + } + out := new(ImageLookupPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageManifest) DeepCopyInto(out *ImageManifest) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageManifest. +func (in *ImageManifest) DeepCopy() *ImageManifest { + if in == nil { + return nil + } + out := new(ImageManifest) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageSignature) DeepCopyInto(out *ImageSignature) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Content != nil { + in, out := &in.Content, &out.Content + *out = make([]byte, len(*in)) + copy(*out, *in) + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]SignatureCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SignedClaims != nil { + in, out := &in.SignedClaims, &out.SignedClaims + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Created != nil { + in, out := &in.Created, &out.Created + *out = (*in).DeepCopy() + } + if in.IssuedBy != nil { + in, out := &in.IssuedBy, &out.IssuedBy + *out = new(SignatureIssuer) + **out = **in + } + if in.IssuedTo != nil { + in, out := &in.IssuedTo, &out.IssuedTo + *out = new(SignatureSubject) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageSignature. +func (in *ImageSignature) DeepCopy() *ImageSignature { + if in == nil { + return nil + } + out := new(ImageSignature) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageSignature) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageStream) DeepCopyInto(out *ImageStream) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStream. +func (in *ImageStream) DeepCopy() *ImageStream { + if in == nil { + return nil + } + out := new(ImageStream) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageStream) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageStreamImage) DeepCopyInto(out *ImageStreamImage) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Image.DeepCopyInto(&out.Image) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStreamImage. +func (in *ImageStreamImage) DeepCopy() *ImageStreamImage { + if in == nil { + return nil + } + out := new(ImageStreamImage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageStreamImage) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageStreamImport) DeepCopyInto(out *ImageStreamImport) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStreamImport. +func (in *ImageStreamImport) DeepCopy() *ImageStreamImport { + if in == nil { + return nil + } + out := new(ImageStreamImport) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageStreamImport) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageStreamImportSpec) DeepCopyInto(out *ImageStreamImportSpec) { + *out = *in + if in.Repository != nil { + in, out := &in.Repository, &out.Repository + *out = new(RepositoryImportSpec) + **out = **in + } + if in.Images != nil { + in, out := &in.Images, &out.Images + *out = make([]ImageImportSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStreamImportSpec. +func (in *ImageStreamImportSpec) DeepCopy() *ImageStreamImportSpec { + if in == nil { + return nil + } + out := new(ImageStreamImportSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageStreamImportStatus) DeepCopyInto(out *ImageStreamImportStatus) { + *out = *in + if in.Import != nil { + in, out := &in.Import, &out.Import + *out = new(ImageStream) + (*in).DeepCopyInto(*out) + } + if in.Repository != nil { + in, out := &in.Repository, &out.Repository + *out = new(RepositoryImportStatus) + (*in).DeepCopyInto(*out) + } + if in.Images != nil { + in, out := &in.Images, &out.Images + *out = make([]ImageImportStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStreamImportStatus. +func (in *ImageStreamImportStatus) DeepCopy() *ImageStreamImportStatus { + if in == nil { + return nil + } + out := new(ImageStreamImportStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageStreamLayers) DeepCopyInto(out *ImageStreamLayers) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Blobs != nil { + in, out := &in.Blobs, &out.Blobs + *out = make(map[string]ImageLayerData, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + if in.Images != nil { + in, out := &in.Images, &out.Images + *out = make(map[string]ImageBlobReferences, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStreamLayers. +func (in *ImageStreamLayers) DeepCopy() *ImageStreamLayers { + if in == nil { + return nil + } + out := new(ImageStreamLayers) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageStreamLayers) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageStreamList) DeepCopyInto(out *ImageStreamList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ImageStream, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStreamList. +func (in *ImageStreamList) DeepCopy() *ImageStreamList { + if in == nil { + return nil + } + out := new(ImageStreamList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageStreamList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageStreamMapping) DeepCopyInto(out *ImageStreamMapping) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Image.DeepCopyInto(&out.Image) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStreamMapping. +func (in *ImageStreamMapping) DeepCopy() *ImageStreamMapping { + if in == nil { + return nil + } + out := new(ImageStreamMapping) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageStreamMapping) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageStreamSpec) DeepCopyInto(out *ImageStreamSpec) { + *out = *in + out.LookupPolicy = in.LookupPolicy + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make([]TagReference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStreamSpec. +func (in *ImageStreamSpec) DeepCopy() *ImageStreamSpec { + if in == nil { + return nil + } + out := new(ImageStreamSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageStreamStatus) DeepCopyInto(out *ImageStreamStatus) { + *out = *in + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make([]NamedTagEventList, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStreamStatus. +func (in *ImageStreamStatus) DeepCopy() *ImageStreamStatus { + if in == nil { + return nil + } + out := new(ImageStreamStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageStreamTag) DeepCopyInto(out *ImageStreamTag) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Tag != nil { + in, out := &in.Tag, &out.Tag + *out = new(TagReference) + (*in).DeepCopyInto(*out) + } + out.LookupPolicy = in.LookupPolicy + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]TagEventCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.Image.DeepCopyInto(&out.Image) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStreamTag. +func (in *ImageStreamTag) DeepCopy() *ImageStreamTag { + if in == nil { + return nil + } + out := new(ImageStreamTag) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageStreamTag) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageStreamTagList) DeepCopyInto(out *ImageStreamTagList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ImageStreamTag, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageStreamTagList. +func (in *ImageStreamTagList) DeepCopy() *ImageStreamTagList { + if in == nil { + return nil + } + out := new(ImageStreamTagList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageStreamTagList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageTag) DeepCopyInto(out *ImageTag) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Spec != nil { + in, out := &in.Spec, &out.Spec + *out = new(TagReference) + (*in).DeepCopyInto(*out) + } + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(NamedTagEventList) + (*in).DeepCopyInto(*out) + } + if in.Image != nil { + in, out := &in.Image, &out.Image + *out = new(Image) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageTag. +func (in *ImageTag) DeepCopy() *ImageTag { + if in == nil { + return nil + } + out := new(ImageTag) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageTag) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageTagList) DeepCopyInto(out *ImageTagList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ImageTag, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageTagList. +func (in *ImageTagList) DeepCopy() *ImageTagList { + if in == nil { + return nil + } + out := new(ImageTagList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageTagList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NamedTagEventList) DeepCopyInto(out *NamedTagEventList) { + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]TagEvent, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]TagEventCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedTagEventList. +func (in *NamedTagEventList) DeepCopy() *NamedTagEventList { + if in == nil { + return nil + } + out := new(NamedTagEventList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RepositoryImportSpec) DeepCopyInto(out *RepositoryImportSpec) { + *out = *in + out.From = in.From + out.ImportPolicy = in.ImportPolicy + out.ReferencePolicy = in.ReferencePolicy + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepositoryImportSpec. +func (in *RepositoryImportSpec) DeepCopy() *RepositoryImportSpec { + if in == nil { + return nil + } + out := new(RepositoryImportSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RepositoryImportStatus) DeepCopyInto(out *RepositoryImportStatus) { + *out = *in + in.Status.DeepCopyInto(&out.Status) + if in.Images != nil { + in, out := &in.Images, &out.Images + *out = make([]ImageImportStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AdditionalTags != nil { + in, out := &in.AdditionalTags, &out.AdditionalTags + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RepositoryImportStatus. +func (in *RepositoryImportStatus) DeepCopy() *RepositoryImportStatus { + if in == nil { + return nil + } + out := new(RepositoryImportStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretList) DeepCopyInto(out *SecretList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]corev1.Secret, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretList. +func (in *SecretList) DeepCopy() *SecretList { + if in == nil { + return nil + } + out := new(SecretList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SecretList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SignatureCondition) DeepCopyInto(out *SignatureCondition) { + *out = *in + in.LastProbeTime.DeepCopyInto(&out.LastProbeTime) + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SignatureCondition. +func (in *SignatureCondition) DeepCopy() *SignatureCondition { + if in == nil { + return nil + } + out := new(SignatureCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SignatureGenericEntity) DeepCopyInto(out *SignatureGenericEntity) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SignatureGenericEntity. +func (in *SignatureGenericEntity) DeepCopy() *SignatureGenericEntity { + if in == nil { + return nil + } + out := new(SignatureGenericEntity) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SignatureIssuer) DeepCopyInto(out *SignatureIssuer) { + *out = *in + out.SignatureGenericEntity = in.SignatureGenericEntity + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SignatureIssuer. +func (in *SignatureIssuer) DeepCopy() *SignatureIssuer { + if in == nil { + return nil + } + out := new(SignatureIssuer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SignatureSubject) DeepCopyInto(out *SignatureSubject) { + *out = *in + out.SignatureGenericEntity = in.SignatureGenericEntity + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SignatureSubject. +func (in *SignatureSubject) DeepCopy() *SignatureSubject { + if in == nil { + return nil + } + out := new(SignatureSubject) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TagEvent) DeepCopyInto(out *TagEvent) { + *out = *in + in.Created.DeepCopyInto(&out.Created) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TagEvent. +func (in *TagEvent) DeepCopy() *TagEvent { + if in == nil { + return nil + } + out := new(TagEvent) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TagEventCondition) DeepCopyInto(out *TagEventCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TagEventCondition. +func (in *TagEventCondition) DeepCopy() *TagEventCondition { + if in == nil { + return nil + } + out := new(TagEventCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TagImportPolicy) DeepCopyInto(out *TagImportPolicy) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TagImportPolicy. +func (in *TagImportPolicy) DeepCopy() *TagImportPolicy { + if in == nil { + return nil + } + out := new(TagImportPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TagReference) DeepCopyInto(out *TagReference) { + *out = *in + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.From != nil { + in, out := &in.From, &out.From + *out = new(corev1.ObjectReference) + **out = **in + } + if in.Generation != nil { + in, out := &in.Generation, &out.Generation + *out = new(int64) + **out = **in + } + out.ImportPolicy = in.ImportPolicy + out.ReferencePolicy = in.ReferencePolicy + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TagReference. +func (in *TagReference) DeepCopy() *TagReference { + if in == nil { + return nil + } + out := new(TagReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TagReferencePolicy) DeepCopyInto(out *TagReferencePolicy) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TagReferencePolicy. +func (in *TagReferencePolicy) DeepCopy() *TagReferencePolicy { + if in == nil { + return nil + } + out := new(TagReferencePolicy) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/api/image/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/image/v1/zz_generated.swagger_doc_generated.go new file mode 100644 index 000000000..ec7fc2b45 --- /dev/null +++ b/vendor/github.com/openshift/api/image/v1/zz_generated.swagger_doc_generated.go @@ -0,0 +1,444 @@ +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_DockerImageReference = map[string]string{ + "": "DockerImageReference points to a container image.", + "Registry": "Registry is the registry that contains the container image", + "Namespace": "Namespace is the namespace that contains the container image", + "Name": "Name is the name of the container image", + "Tag": "Tag is which tag of the container image is being referenced", + "ID": "ID is the identifier for the container image", +} + +func (DockerImageReference) SwaggerDoc() map[string]string { + return map_DockerImageReference +} + +var map_Image = map[string]string{ + "": "Image is an immutable representation of a container image and metadata at a point in time. Images are named by taking a hash of their contents (metadata and content) and any change in format, content, or metadata results in a new name. The images resource is primarily for use by cluster administrators and integrations like the cluster image registry - end users instead access images via the imagestreamtags or imagestreamimages resources. While image metadata is stored in the API, any integration that implements the container image registry API must provide its own storage for the raw manifest data, image config, and layer contents.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "dockerImageReference": "DockerImageReference is the string that can be used to pull this image.", + "dockerImageMetadata": "DockerImageMetadata contains metadata about this image", + "dockerImageMetadataVersion": "DockerImageMetadataVersion conveys the version of the object, which if empty defaults to \"1.0\"", + "dockerImageManifest": "DockerImageManifest is the raw JSON of the manifest", + "dockerImageLayers": "DockerImageLayers represents the layers in the image. May not be set if the image does not define that data or if the image represents a manifest list.", + "signatures": "Signatures holds all signatures of the image.", + "dockerImageSignatures": "DockerImageSignatures provides the signatures as opaque blobs. This is a part of manifest schema v1.", + "dockerImageManifestMediaType": "DockerImageManifestMediaType specifies the mediaType of manifest. This is a part of manifest schema v2.", + "dockerImageConfig": "DockerImageConfig is a JSON blob that the runtime uses to set up the container. This is a part of manifest schema v2. Will not be set when the image represents a manifest list.", + "dockerImageManifests": "DockerImageManifests holds information about sub-manifests when the image represents a manifest list. When this field is present, no DockerImageLayers should be specified.", +} + +func (Image) SwaggerDoc() map[string]string { + return map_Image +} + +var map_ImageBlobReferences = map[string]string{ + "": "ImageBlobReferences describes the blob references within an image.", + "imageMissing": "imageMissing is true if the image is referenced by the image stream but the image object has been deleted from the API by an administrator. When this field is set, layers and config fields may be empty and callers that depend on the image metadata should consider the image to be unavailable for download or viewing.", + "layers": "layers is the list of blobs that compose this image, from base layer to top layer. All layers referenced by this array will be defined in the blobs map. Some images may have zero layers.", + "config": "config, if set, is the blob that contains the image config. Some images do not have separate config blobs and this field will be set to nil if so.", + "manifests": "manifests is the list of other image names that this image points to. For a single architecture image, it is empty. For a multi-arch image, it consists of the digests of single architecture images, such images shouldn't have layers nor config.", +} + +func (ImageBlobReferences) SwaggerDoc() map[string]string { + return map_ImageBlobReferences +} + +var map_ImageImportSpec = map[string]string{ + "": "ImageImportSpec describes a request to import a specific image.", + "from": "From is the source of an image to import; only kind DockerImage is allowed", + "to": "To is a tag in the current image stream to assign the imported image to, if name is not specified the default tag from from.name will be used", + "importPolicy": "ImportPolicy is the policy controlling how the image is imported", + "referencePolicy": "ReferencePolicy defines how other components should consume the image", + "includeManifest": "IncludeManifest determines if the manifest for each image is returned in the response", +} + +func (ImageImportSpec) SwaggerDoc() map[string]string { + return map_ImageImportSpec +} + +var map_ImageImportStatus = map[string]string{ + "": "ImageImportStatus describes the result of an image import.", + "status": "Status is the status of the image import, including errors encountered while retrieving the image", + "image": "Image is the metadata of that image, if the image was located", + "tag": "Tag is the tag this image was located under, if any", + "manifests": "Manifests holds sub-manifests metadata when importing a manifest list", +} + +func (ImageImportStatus) SwaggerDoc() map[string]string { + return map_ImageImportStatus +} + +var map_ImageLayer = map[string]string{ + "": "ImageLayer represents a single layer of the image. Some images may have multiple layers. Some may have none.", + "name": "Name of the layer as defined by the underlying store.", + "size": "Size of the layer in bytes as defined by the underlying store.", + "mediaType": "MediaType of the referenced object.", +} + +func (ImageLayer) SwaggerDoc() map[string]string { + return map_ImageLayer +} + +var map_ImageLayerData = map[string]string{ + "": "ImageLayerData contains metadata about an image layer.", + "size": "Size of the layer in bytes as defined by the underlying store. This field is optional if the necessary information about size is not available.", + "mediaType": "MediaType of the referenced object.", +} + +func (ImageLayerData) SwaggerDoc() map[string]string { + return map_ImageLayerData +} + +var map_ImageList = map[string]string{ + "": "ImageList is a list of Image objects.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "Items is a list of images", +} + +func (ImageList) SwaggerDoc() map[string]string { + return map_ImageList +} + +var map_ImageLookupPolicy = map[string]string{ + "": "ImageLookupPolicy describes how an image stream can be used to override the image references used by pods, builds, and other resources in a namespace.", + "local": "local will change the docker short image references (like \"mysql\" or \"php:latest\") on objects in this namespace to the image ID whenever they match this image stream, instead of reaching out to a remote registry. The name will be fully qualified to an image ID if found. The tag's referencePolicy is taken into account on the replaced value. Only works within the current namespace.", +} + +func (ImageLookupPolicy) SwaggerDoc() map[string]string { + return map_ImageLookupPolicy +} + +var map_ImageManifest = map[string]string{ + "": "ImageManifest represents sub-manifests of a manifest list. The Digest field points to a regular Image object.", + "digest": "Digest is the unique identifier for the manifest. It refers to an Image object.", + "mediaType": "MediaType defines the type of the manifest, possible values are application/vnd.oci.image.manifest.v1+json, application/vnd.docker.distribution.manifest.v2+json or application/vnd.docker.distribution.manifest.v1+json.", + "manifestSize": "ManifestSize represents the size of the raw object contents, in bytes.", + "architecture": "Architecture specifies the supported CPU architecture, for example `amd64` or `ppc64le`.", + "os": "OS specifies the operating system, for example `linux`.", + "variant": "Variant is an optional field repreenting a variant of the CPU, for example v6 to specify a particular CPU variant of the ARM CPU.", +} + +func (ImageManifest) SwaggerDoc() map[string]string { + return map_ImageManifest +} + +var map_ImageSignature = map[string]string{ + "": "ImageSignature holds a signature of an image. It allows to verify image identity and possibly other claims as long as the signature is trusted. Based on this information it is possible to restrict runnable images to those matching cluster-wide policy. Mandatory fields should be parsed by clients doing image verification. The others are parsed from signature's content by the server. They serve just an informative purpose.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "type": "Required: Describes a type of stored blob.", + "content": "Required: An opaque binary string which is an image's signature.", + "conditions": "Conditions represent the latest available observations of a signature's current state.", + "imageIdentity": "A human readable string representing image's identity. It could be a product name and version, or an image pull spec (e.g. \"registry.access.redhat.com/rhel7/rhel:7.2\").", + "signedClaims": "Contains claims from the signature.", + "created": "If specified, it is the time of signature's creation.", + "issuedBy": "If specified, it holds information about an issuer of signing certificate or key (a person or entity who signed the signing certificate or key).", + "issuedTo": "If specified, it holds information about a subject of signing certificate or key (a person or entity who signed the image).", +} + +func (ImageSignature) SwaggerDoc() map[string]string { + return map_ImageSignature +} + +var map_ImageStream = map[string]string{ + "": "An ImageStream stores a mapping of tags to images, metadata overrides that are applied when images are tagged in a stream, and an optional reference to a container image repository on a registry. Users typically update the spec.tags field to point to external images which are imported from container registries using credentials in your namespace with the pull secret type, or to existing image stream tags and images which are immediately accessible for tagging or pulling. The history of images applied to a tag is visible in the status.tags field and any user who can view an image stream is allowed to tag that image into their own image streams. Access to pull images from the integrated registry is granted by having the \"get imagestreams/layers\" permission on a given image stream. Users may remove a tag by deleting the imagestreamtag resource, which causes both spec and status for that tag to be removed. Image stream history is retained until an administrator runs the prune operation, which removes references that are no longer in use. To preserve a historical image, ensure there is a tag in spec pointing to that image by its digest.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Spec describes the desired state of this stream", + "status": "Status describes the current state of this stream", +} + +func (ImageStream) SwaggerDoc() map[string]string { + return map_ImageStream +} + +var map_ImageStreamImage = map[string]string{ + "": "ImageStreamImage represents an Image that is retrieved by image name from an ImageStream. User interfaces and regular users can use this resource to access the metadata details of a tagged image in the image stream history for viewing, since Image resources are not directly accessible to end users. A not found error will be returned if no such image is referenced by a tag within the ImageStream. Images are created when spec tags are set on an image stream that represent an image in an external registry, when pushing to the integrated registry, or when tagging an existing image from one image stream to another. The name of an image stream image is in the form \"@\", where the digest is the content addressible identifier for the image (sha256:xxxxx...). You can use ImageStreamImages as the from.kind of an image stream spec tag to reference an image exactly. The only operations supported on the imagestreamimage endpoint are retrieving the image.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "image": "Image associated with the ImageStream and image name.", +} + +func (ImageStreamImage) SwaggerDoc() map[string]string { + return map_ImageStreamImage +} + +var map_ImageStreamImport = map[string]string{ + "": "The image stream import resource provides an easy way for a user to find and import container images from other container image registries into the server. Individual images or an entire image repository may be imported, and users may choose to see the results of the import prior to tagging the resulting images into the specified image stream.\n\nThis API is intended for end-user tools that need to see the metadata of the image prior to import (for instance, to generate an application from it). Clients that know the desired image can continue to create spec.tags directly into their image streams.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Spec is a description of the images that the user wishes to import", + "status": "Status is the result of importing the image", +} + +func (ImageStreamImport) SwaggerDoc() map[string]string { + return map_ImageStreamImport +} + +var map_ImageStreamImportSpec = map[string]string{ + "": "ImageStreamImportSpec defines what images should be imported.", + "import": "Import indicates whether to perform an import - if so, the specified tags are set on the spec and status of the image stream defined by the type meta.", + "repository": "Repository is an optional import of an entire container image repository. A maximum limit on the number of tags imported this way is imposed by the server.", + "images": "Images are a list of individual images to import.", +} + +func (ImageStreamImportSpec) SwaggerDoc() map[string]string { + return map_ImageStreamImportSpec +} + +var map_ImageStreamImportStatus = map[string]string{ + "": "ImageStreamImportStatus contains information about the status of an image stream import.", + "import": "Import is the image stream that was successfully updated or created when 'to' was set.", + "repository": "Repository is set if spec.repository was set to the outcome of the import", + "images": "Images is set with the result of importing spec.images", +} + +func (ImageStreamImportStatus) SwaggerDoc() map[string]string { + return map_ImageStreamImportStatus +} + +var map_ImageStreamLayers = map[string]string{ + "": "ImageStreamLayers describes information about the layers referenced by images in this image stream.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "blobs": "blobs is a map of blob name to metadata about the blob.", + "images": "images is a map between an image name and the names of the blobs and config that comprise the image.", +} + +func (ImageStreamLayers) SwaggerDoc() map[string]string { + return map_ImageStreamLayers +} + +var map_ImageStreamList = map[string]string{ + "": "ImageStreamList is a list of ImageStream objects.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "Items is a list of imageStreams", +} + +func (ImageStreamList) SwaggerDoc() map[string]string { + return map_ImageStreamList +} + +var map_ImageStreamMapping = map[string]string{ + "": "ImageStreamMapping represents a mapping from a single image stream tag to a container image as well as the reference to the container image stream the image came from. This resource is used by privileged integrators to create an image resource and to associate it with an image stream in the status tags field. Creating an ImageStreamMapping will allow any user who can view the image stream to tag or pull that image, so only create mappings where the user has proven they have access to the image contents directly. The only operation supported for this resource is create and the metadata name and namespace should be set to the image stream containing the tag that should be updated.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "image": "Image is a container image.", + "tag": "Tag is a string value this image can be located with inside the stream.", +} + +func (ImageStreamMapping) SwaggerDoc() map[string]string { + return map_ImageStreamMapping +} + +var map_ImageStreamSpec = map[string]string{ + "": "ImageStreamSpec represents options for ImageStreams.", + "lookupPolicy": "lookupPolicy controls how other resources reference images within this namespace.", + "dockerImageRepository": "dockerImageRepository is optional, if specified this stream is backed by a container repository on this server Deprecated: This field is deprecated as of v3.7 and will be removed in a future release. Specify the source for the tags to be imported in each tag via the spec.tags.from reference instead.", + "tags": "tags map arbitrary string values to specific image locators", +} + +func (ImageStreamSpec) SwaggerDoc() map[string]string { + return map_ImageStreamSpec +} + +var map_ImageStreamStatus = map[string]string{ + "": "ImageStreamStatus contains information about the state of this image stream.", + "dockerImageRepository": "DockerImageRepository represents the effective location this stream may be accessed at. May be empty until the server determines where the repository is located", + "publicDockerImageRepository": "PublicDockerImageRepository represents the public location from where the image can be pulled outside the cluster. This field may be empty if the administrator has not exposed the integrated registry externally.", + "tags": "Tags are a historical record of images associated with each tag. The first entry in the TagEvent array is the currently tagged image.", +} + +func (ImageStreamStatus) SwaggerDoc() map[string]string { + return map_ImageStreamStatus +} + +var map_ImageStreamTag = map[string]string{ + "": "ImageStreamTag represents an Image that is retrieved by tag name from an ImageStream. Use this resource to interact with the tags and images in an image stream by tag, or to see the image details for a particular tag. The image associated with this resource is the most recently successfully tagged, imported, or pushed image (as described in the image stream status.tags.items list for this tag). If an import is in progress or has failed the previous image will be shown. Deleting an image stream tag clears both the status and spec fields of an image stream. If no image can be retrieved for a given tag, a not found error will be returned.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "tag": "tag is the spec tag associated with this image stream tag, and it may be null if only pushes have occurred to this image stream.", + "generation": "generation is the current generation of the tagged image - if tag is provided and this value is not equal to the tag generation, a user has requested an import that has not completed, or conditions will be filled out indicating any error.", + "lookupPolicy": "lookupPolicy indicates whether this tag will handle image references in this namespace.", + "conditions": "conditions is an array of conditions that apply to the image stream tag.", + "image": "image associated with the ImageStream and tag.", +} + +func (ImageStreamTag) SwaggerDoc() map[string]string { + return map_ImageStreamTag +} + +var map_ImageStreamTagList = map[string]string{ + "": "ImageStreamTagList is a list of ImageStreamTag objects.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "Items is the list of image stream tags", +} + +func (ImageStreamTagList) SwaggerDoc() map[string]string { + return map_ImageStreamTagList +} + +var map_ImageTag = map[string]string{ + "": "ImageTag represents a single tag within an image stream and includes the spec, the status history, and the currently referenced image (if any) of the provided tag. This type replaces the ImageStreamTag by providing a full view of the tag. ImageTags are returned for every spec or status tag present on the image stream. If no tag exists in either form a not found error will be returned by the API. A create operation will succeed if no spec tag has already been defined and the spec field is set. Delete will remove both spec and status elements from the image stream.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec is the spec tag associated with this image stream tag, and it may be null if only pushes have occurred to this image stream.", + "status": "status is the status tag details associated with this image stream tag, and it may be null if no push or import has been performed.", + "image": "image is the details of the most recent image stream status tag, and it may be null if import has not completed or an administrator has deleted the image object. To verify this is the most recent image, you must verify the generation of the most recent status.items entry matches the spec tag (if a spec tag is set). This field will not be set when listing image tags.", +} + +func (ImageTag) SwaggerDoc() map[string]string { + return map_ImageTag +} + +var map_ImageTagList = map[string]string{ + "": "ImageTagList is a list of ImageTag objects. When listing image tags, the image field is not populated. Tags are returned in alphabetical order by image stream and then tag.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "Items is the list of image stream tags", +} + +func (ImageTagList) SwaggerDoc() map[string]string { + return map_ImageTagList +} + +var map_NamedTagEventList = map[string]string{ + "": "NamedTagEventList relates a tag to its image history.", + "tag": "Tag is the tag for which the history is recorded", + "items": "Standard object's metadata.", + "conditions": "Conditions is an array of conditions that apply to the tag event list.", +} + +func (NamedTagEventList) SwaggerDoc() map[string]string { + return map_NamedTagEventList +} + +var map_RepositoryImportSpec = map[string]string{ + "": "RepositoryImportSpec describes a request to import images from a container image repository.", + "from": "From is the source for the image repository to import; only kind DockerImage and a name of a container image repository is allowed", + "importPolicy": "ImportPolicy is the policy controlling how the image is imported", + "referencePolicy": "ReferencePolicy defines how other components should consume the image", + "includeManifest": "IncludeManifest determines if the manifest for each image is returned in the response", +} + +func (RepositoryImportSpec) SwaggerDoc() map[string]string { + return map_RepositoryImportSpec +} + +var map_RepositoryImportStatus = map[string]string{ + "": "RepositoryImportStatus describes the result of an image repository import", + "status": "Status reflects whether any failure occurred during import", + "images": "Images is a list of images successfully retrieved by the import of the repository.", + "additionalTags": "AdditionalTags are tags that exist in the repository but were not imported because a maximum limit of automatic imports was applied.", +} + +func (RepositoryImportStatus) SwaggerDoc() map[string]string { + return map_RepositoryImportStatus +} + +var map_SignatureCondition = map[string]string{ + "": "SignatureCondition describes an image signature condition of particular kind at particular probe time.", + "type": "Type of signature condition, Complete or Failed.", + "status": "Status of the condition, one of True, False, Unknown.", + "lastProbeTime": "Last time the condition was checked.", + "lastTransitionTime": "Last time the condition transit from one status to another.", + "reason": "(brief) reason for the condition's last transition.", + "message": "Human readable message indicating details about last transition.", +} + +func (SignatureCondition) SwaggerDoc() map[string]string { + return map_SignatureCondition +} + +var map_SignatureGenericEntity = map[string]string{ + "": "SignatureGenericEntity holds a generic information about a person or entity who is an issuer or a subject of signing certificate or key.", + "organization": "Organization name.", + "commonName": "Common name (e.g. openshift-signing-service).", +} + +func (SignatureGenericEntity) SwaggerDoc() map[string]string { + return map_SignatureGenericEntity +} + +var map_SignatureIssuer = map[string]string{ + "": "SignatureIssuer holds information about an issuer of signing certificate or key.", +} + +func (SignatureIssuer) SwaggerDoc() map[string]string { + return map_SignatureIssuer +} + +var map_SignatureSubject = map[string]string{ + "": "SignatureSubject holds information about a person or entity who created the signature.", + "publicKeyID": "If present, it is a human readable key id of public key belonging to the subject used to verify image signature. It should contain at least 64 lowest bits of public key's fingerprint (e.g. 0x685ebe62bf278440).", +} + +func (SignatureSubject) SwaggerDoc() map[string]string { + return map_SignatureSubject +} + +var map_TagEvent = map[string]string{ + "": "TagEvent is used by ImageStreamStatus to keep a historical record of images associated with a tag.", + "created": "Created holds the time the TagEvent was created", + "dockerImageReference": "DockerImageReference is the string that can be used to pull this image", + "image": "Image is the image", + "generation": "Generation is the spec tag generation that resulted in this tag being updated", +} + +func (TagEvent) SwaggerDoc() map[string]string { + return map_TagEvent +} + +var map_TagEventCondition = map[string]string{ + "": "TagEventCondition contains condition information for a tag event.", + "type": "Type of tag event condition, currently only ImportSuccess", + "status": "Status of the condition, one of True, False, Unknown.", + "lastTransitionTime": "LastTransitionTIme is the time the condition transitioned from one status to another.", + "reason": "Reason is a brief machine readable explanation for the condition's last transition.", + "message": "Message is a human readable description of the details about last transition, complementing reason.", + "generation": "Generation is the spec tag generation that this status corresponds to", +} + +func (TagEventCondition) SwaggerDoc() map[string]string { + return map_TagEventCondition +} + +var map_TagImportPolicy = map[string]string{ + "": "TagImportPolicy controls how images related to this tag will be imported.", + "insecure": "Insecure is true if the server may bypass certificate verification or connect directly over HTTP during image import.", + "scheduled": "Scheduled indicates to the server that this tag should be periodically checked to ensure it is up to date, and imported", + "importMode": "ImportMode describes how to import an image manifest.", +} + +func (TagImportPolicy) SwaggerDoc() map[string]string { + return map_TagImportPolicy +} + +var map_TagReference = map[string]string{ + "": "TagReference specifies optional annotations for images using this tag and an optional reference to an ImageStreamTag, ImageStreamImage, or DockerImage this tag should track.", + "name": "Name of the tag", + "annotations": "Optional; if specified, annotations that are applied to images retrieved via ImageStreamTags.", + "from": "Optional; if specified, a reference to another image that this tag should point to. Valid values are ImageStreamTag, ImageStreamImage, and DockerImage. ImageStreamTag references can only reference a tag within this same ImageStream.", + "reference": "Reference states if the tag will be imported. Default value is false, which means the tag will be imported.", + "generation": "Generation is a counter that tracks mutations to the spec tag (user intent). When a tag reference is changed the generation is set to match the current stream generation (which is incremented every time spec is changed). Other processes in the system like the image importer observe that the generation of spec tag is newer than the generation recorded in the status and use that as a trigger to import the newest remote tag. To trigger a new import, clients may set this value to zero which will reset the generation to the latest stream generation. Legacy clients will send this value as nil which will be merged with the current tag generation.", + "importPolicy": "ImportPolicy is information that controls how images may be imported by the server.", + "referencePolicy": "ReferencePolicy defines how other components should consume the image.", +} + +func (TagReference) SwaggerDoc() map[string]string { + return map_TagReference +} + +var map_TagReferencePolicy = map[string]string{ + "": "TagReferencePolicy describes how pull-specs for images in this image stream tag are generated when image change triggers in deployment configs or builds are resolved. This allows the image stream author to control how images are accessed.", + "type": "Type determines how the image pull spec should be transformed when the image stream tag is used in deployment config triggers or new builds. The default value is `Source`, indicating the original location of the image should be used (if imported). The user may also specify `Local`, indicating that the pull spec should point to the integrated container image registry and leverage the registry's ability to proxy the pull to an upstream registry. `Local` allows the credentials used to pull this image to be managed from the image stream's namespace, so others on the platform can access a remote image but have no access to the remote secret. It also allows the image layers to be mirrored into the local registry which the images can still be pulled even if the upstream registry is unavailable.", +} + +func (TagReferencePolicy) SwaggerDoc() map[string]string { + return map_TagReferencePolicy +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/api/imageregistry/.codegen.yaml b/vendor/github.com/openshift/api/imageregistry/.codegen.yaml new file mode 100644 index 000000000..ffa2c8d9b --- /dev/null +++ b/vendor/github.com/openshift/api/imageregistry/.codegen.yaml @@ -0,0 +1,2 @@ +swaggerdocs: + commentPolicy: Warn diff --git a/vendor/github.com/openshift/api/imageregistry/install.go b/vendor/github.com/openshift/api/imageregistry/install.go new file mode 100644 index 000000000..4536c8f40 --- /dev/null +++ b/vendor/github.com/openshift/api/imageregistry/install.go @@ -0,0 +1,26 @@ +package imageregistry + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + imageregistryv1 "github.com/openshift/api/imageregistry/v1" +) + +const ( + GroupName = "imageregistry.operator.openshift.io" +) + +var ( + schemeBuilder = runtime.NewSchemeBuilder(imageregistryv1.Install) + // Install is a function which adds every version of this group to a scheme + Install = schemeBuilder.AddToScheme +) + +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +func Kind(kind string) schema.GroupKind { + return schema.GroupKind{Group: GroupName, Kind: kind} +} diff --git a/vendor/github.com/openshift/api/imageregistry/v1/00_imageregistry.crd.yaml b/vendor/github.com/openshift/api/imageregistry/v1/00_imageregistry.crd.yaml new file mode 100644 index 000000000..e2406e37c --- /dev/null +++ b/vendor/github.com/openshift/api/imageregistry/v1/00_imageregistry.crd.yaml @@ -0,0 +1,1263 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/519 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: configs.imageregistry.operator.openshift.io +spec: + group: imageregistry.operator.openshift.io + names: + kind: Config + listKind: ConfigList + plural: configs + singular: config + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "Config is the configuration object for a registry instance managed by the registry operator \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ImageRegistrySpec defines the specs for the running registry. + properties: + affinity: + description: affinity is a group of node affinity scheduling rules for the image registry pod(s). + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. + items: + description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements by node's fields. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. The terms are ORed. + items: + description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements by node's fields. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + x-kubernetes-map-type: atomic + type: array + required: + - nodeSelectorTerms + type: object + x-kubernetes-map-type: atomic + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + defaultRoute: + description: defaultRoute indicates whether an external facing route for the registry should be created using the default generated hostname. + type: boolean + disableRedirect: + description: disableRedirect controls whether to route all data through the Registry, rather than redirecting to the backend. + type: boolean + httpSecret: + description: httpSecret is the value needed by the registry to secure uploads, generated by default. + type: string + logLevel: + default: Normal + description: "logLevel is an intent based logging for an overall component. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for their operands. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + type: string + logging: + description: logging is deprecated, use logLevel instead. + format: int64 + type: integer + managementState: + description: managementState indicates whether and how the operator should manage the component + pattern: ^(Managed|Unmanaged|Force|Removed)$ + type: string + nodeSelector: + additionalProperties: + type: string + description: nodeSelector defines the node selection constraints for the registry pod. + type: object + observedConfig: + description: observedConfig holds a sparse config that controller has observed from the cluster state. It exists in spec because it is an input to the level for the operator + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + operatorLogLevel: + default: Normal + description: "operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + type: string + proxy: + description: proxy defines the proxy to be used when calling master api, upstream registries, etc. + properties: + http: + description: http defines the proxy to be used by the image registry when accessing HTTP endpoints. + type: string + https: + description: https defines the proxy to be used by the image registry when accessing HTTPS endpoints. + type: string + noProxy: + description: noProxy defines a comma-separated list of host names that shouldn't go through any proxy. + type: string + type: object + readOnly: + description: readOnly indicates whether the registry instance should reject attempts to push new images or delete existing ones. + type: boolean + replicas: + description: replicas determines the number of registry instances to run. + format: int32 + type: integer + requests: + description: requests controls how many parallel requests a given registry instance will handle before queuing additional requests. + properties: + read: + description: read defines limits for image registry's reads. + properties: + maxInQueue: + description: maxInQueue sets the maximum queued api requests to the registry. + type: integer + maxRunning: + description: maxRunning sets the maximum in flight api requests to the registry. + type: integer + maxWaitInQueue: + description: maxWaitInQueue sets the maximum time a request can wait in the queue before being rejected. + format: duration + type: string + type: object + write: + description: write defines limits for image registry's writes. + properties: + maxInQueue: + description: maxInQueue sets the maximum queued api requests to the registry. + type: integer + maxRunning: + description: maxRunning sets the maximum in flight api requests to the registry. + type: integer + maxWaitInQueue: + description: maxWaitInQueue sets the maximum time a request can wait in the queue before being rejected. + format: duration + type: string + type: object + type: object + resources: + description: resources defines the resource requests+limits for the registry pod. + properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable. It can only be set for containers." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + rolloutStrategy: + description: rolloutStrategy defines rollout strategy for the image registry deployment. + pattern: ^(RollingUpdate|Recreate)$ + type: string + routes: + description: routes defines additional external facing routes which should be created for the registry. + items: + description: ImageRegistryConfigRoute holds information on external route access to image registry. + properties: + hostname: + description: hostname for the route. + type: string + name: + description: name of the route to be created. + type: string + secretName: + description: secretName points to secret containing the certificates to be used by the route. + type: string + required: + - name + type: object + type: array + storage: + description: storage details for configuring registry storage, e.g. S3 bucket coordinates. + properties: + azure: + description: azure represents configuration that uses Azure Blob Storage. + properties: + accountName: + description: accountName defines the account to be used by the registry. + type: string + cloudName: + description: cloudName is the name of the Azure cloud environment to be used by the registry. If empty, the operator will set it based on the infrastructure object. + type: string + container: + description: container defines Azure's container to be used by registry. + maxLength: 63 + minLength: 3 + pattern: ^[0-9a-z]+(-[0-9a-z]+)*$ + type: string + type: object + emptyDir: + description: 'emptyDir represents ephemeral storage on the pod''s host node. WARNING: this storage cannot be used with more than 1 replica and is not suitable for production use. When the pod is removed from a node for any reason, the data in the emptyDir is deleted forever.' + type: object + gcs: + description: gcs represents configuration that uses Google Cloud Storage. + properties: + bucket: + description: bucket is the bucket name in which you want to store the registry's data. Optional, will be generated if not provided. + type: string + keyID: + description: keyID is the KMS key ID to use for encryption. Optional, buckets are encrypted by default on GCP. This allows for the use of a custom encryption key. + type: string + projectID: + description: projectID is the Project ID of the GCP project that this bucket should be associated with. + type: string + region: + description: region is the GCS location in which your bucket exists. Optional, will be set based on the installed GCS Region. + type: string + type: object + ibmcos: + description: ibmcos represents configuration that uses IBM Cloud Object Storage. + properties: + bucket: + description: bucket is the bucket name in which you want to store the registry's data. Optional, will be generated if not provided. + type: string + location: + description: location is the IBM Cloud location in which your bucket exists. Optional, will be set based on the installed IBM Cloud location. + type: string + resourceGroupName: + description: resourceGroupName is the name of the IBM Cloud resource group that this bucket and its service instance is associated with. Optional, will be set based on the installed IBM Cloud resource group. + type: string + resourceKeyCRN: + description: resourceKeyCRN is the CRN of the IBM Cloud resource key that is created for the service instance. Commonly referred as a service credential and must contain HMAC type credentials. Optional, will be computed if not provided. + pattern: ^crn:.+:.+:.+:cloud-object-storage:.+:.+:.+:resource-key:.+$ + type: string + serviceInstanceCRN: + description: serviceInstanceCRN is the CRN of the IBM Cloud Object Storage service instance that this bucket is associated with. Optional, will be computed if not provided. + pattern: ^crn:.+:.+:.+:cloud-object-storage:.+:.+:.+::$ + type: string + type: object + managementState: + description: managementState indicates if the operator manages the underlying storage unit. If Managed the operator will remove the storage when this operator gets Removed. + pattern: ^(Managed|Unmanaged)$ + type: string + oss: + description: Oss represents configuration that uses Alibaba Cloud Object Storage Service. + properties: + bucket: + description: Bucket is the bucket name in which you want to store the registry's data. About Bucket naming, more details you can look at the [official documentation](https://www.alibabacloud.com/help/doc-detail/257087.htm) Empty value means no opinion and the platform chooses the a default, which is subject to change over time. Currently the default will be autogenerated in the form of -image-registry-- + maxLength: 63 + minLength: 3 + pattern: ^[0-9a-z]+(-[0-9a-z]+)*$ + type: string + encryption: + anyOf: + - not: + required: + - kms + properties: + method: + not: + enum: + - KMS + - properties: + method: + enum: + - KMS + required: + - kms + description: Encryption specifies whether you would like your data encrypted on the server side. More details, you can look cat the [official documentation](https://www.alibabacloud.com/help/doc-detail/117914.htm) + properties: + kms: + description: KMS (key management service) is an encryption type that holds the struct for KMS KeyID + properties: + keyID: + description: KeyID holds the KMS encryption key ID + minLength: 1 + type: string + required: + - keyID + type: object + method: + default: AES256 + description: Method defines the different encrytion modes available Empty value means no opinion and the platform chooses the a default, which is subject to change over time. Currently the default is `AES256`. + enum: + - KMS + - AES256 + type: string + type: object + endpointAccessibility: + default: Internal + description: EndpointAccessibility specifies whether the registry use the OSS VPC internal endpoint Empty value means no opinion and the platform chooses the a default, which is subject to change over time. Currently the default is `Internal`. + enum: + - Internal + - Public + - "" + type: string + region: + description: Region is the Alibaba Cloud Region in which your bucket exists. For a list of regions, you can look at the [official documentation](https://www.alibabacloud.com/help/doc-detail/31837.html). Empty value means no opinion and the platform chooses the a default, which is subject to change over time. Currently the default will be based on the installed Alibaba Cloud Region. + type: string + type: object + pvc: + description: pvc represents configuration that uses a PersistentVolumeClaim. + properties: + claim: + description: claim defines the Persisent Volume Claim's name to be used. + type: string + type: object + s3: + description: s3 represents configuration that uses Amazon Simple Storage Service. + properties: + bucket: + description: bucket is the bucket name in which you want to store the registry's data. Optional, will be generated if not provided. + type: string + cloudFront: + description: cloudFront configures Amazon Cloudfront as the storage middleware in a registry. + properties: + baseURL: + description: baseURL contains the SCHEME://HOST[/PATH] at which Cloudfront is served. + type: string + duration: + description: duration is the duration of the Cloudfront session. + format: duration + type: string + keypairID: + description: keypairID is key pair ID provided by AWS. + type: string + privateKey: + description: privateKey points to secret containing the private key, provided by AWS. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + required: + - baseURL + - keypairID + - privateKey + type: object + encrypt: + description: encrypt specifies whether the registry stores the image in encrypted format or not. Optional, defaults to false. + type: boolean + keyID: + description: keyID is the KMS key ID to use for encryption. Optional, Encrypt must be true, or this parameter is ignored. + type: string + region: + description: region is the AWS region in which your bucket exists. Optional, will be set based on the installed AWS Region. + type: string + regionEndpoint: + description: regionEndpoint is the endpoint for S3 compatible storage services. It should be a valid URL with scheme, e.g. https://s3.example.com. Optional, defaults based on the Region that is provided. + type: string + trustedCA: + description: "trustedCA is a reference to a config map containing a CA bundle. The image registry and its operator use certificates from this bundle to verify S3 server certificates. \n The namespace for the config map referenced by trustedCA is \"openshift-config\". The key for the bundle in the config map is \"ca-bundle.crt\"." + properties: + name: + description: name is the metadata.name of the referenced config map. This field must adhere to standard config map naming restrictions. The name must consist solely of alphanumeric characters, hyphens (-) and periods (.). It has a maximum length of 253 characters. If this field is not specified or is empty string, the default trust bundle will be used. + maxLength: 253 + pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + type: object + virtualHostedStyle: + description: virtualHostedStyle enables using S3 virtual hosted style bucket paths with a custom RegionEndpoint Optional, defaults to false. + type: boolean + type: object + swift: + description: swift represents configuration that uses OpenStack Object Storage. + properties: + authURL: + description: authURL defines the URL for obtaining an authentication token. + type: string + authVersion: + description: authVersion specifies the OpenStack Auth's version. + type: string + container: + description: container defines the name of Swift container where to store the registry's data. + type: string + domain: + description: domain specifies Openstack's domain name for Identity v3 API. + type: string + domainID: + description: domainID specifies Openstack's domain id for Identity v3 API. + type: string + regionName: + description: regionName defines Openstack's region in which container exists. + type: string + tenant: + description: tenant defines Openstack tenant name to be used by registry. + type: string + tenantID: + description: tenant defines Openstack tenant id to be used by registry. + type: string + type: object + type: object + tolerations: + description: tolerations defines the tolerations for the registry pod. + items: + description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + topologySpreadConstraints: + description: topologySpreadConstraints specify how to spread matching pods among the given topology. + items: + description: TopologySpreadConstraint specifies how to spread matching pods among the given topology. + properties: + labelSelector: + description: LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + matchLabelKeys: + description: "MatchLabelKeys is a set of pod label keys to select the pods over which spreading will be calculated. The keys are used to lookup values from the incoming pod labels, those key-value labels are ANDed with labelSelector to select the group of existing pods over which spreading will be calculated for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector. MatchLabelKeys cannot be set when LabelSelector isn't set. Keys that don't exist in the incoming pod labels will be ignored. A null or empty list means only match against labelSelector. \n This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default)." + items: + type: string + type: array + x-kubernetes-list-type: atomic + maxSkew: + description: 'MaxSkew describes the degree to which pods may be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`, it is the maximum permitted difference between the number of matching pods in the target topology and the global minimum. The global minimum is the minimum number of matching pods in an eligible domain or zero if the number of eligible domains is less than MinDomains. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 2/2/1: In this case, the global minimum is 1. | zone1 | zone2 | zone3 | | P P | P P | P | - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2; scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`, it is used to give higher precedence to topologies that satisfy it. It''s a required field. Default value is 1 and 0 is not allowed.' + format: int32 + type: integer + minDomains: + description: "MinDomains indicates a minimum number of eligible domains. When the number of eligible domains with matching topology keys is less than minDomains, Pod Topology Spread treats \"global minimum\" as 0, and then the calculation of Skew is performed. And when the number of eligible domains with matching topology keys equals or greater than minDomains, this value has no effect on scheduling. As a result, when the number of eligible domains is less than minDomains, scheduler won't schedule more than maxSkew Pods to those domains. If value is nil, the constraint behaves as if MinDomains is equal to 1. Valid values are integers greater than 0. When value is not nil, WhenUnsatisfiable must be DoNotSchedule. \n For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same labelSelector spread as 2/2/2: | zone1 | zone2 | zone3 | | P P | P P | P P | The number of domains is less than 5(MinDomains), so \"global minimum\" is treated as 0. In this situation, new pod with the same labelSelector cannot be scheduled, because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones, it will violate MaxSkew. \n This is a beta field and requires the MinDomainsInPodTopologySpread feature gate to be enabled (enabled by default)." + format: int32 + type: integer + nodeAffinityPolicy: + description: "NodeAffinityPolicy indicates how we will treat Pod's nodeAffinity/nodeSelector when calculating pod topology spread skew. Options are: - Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations. - Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations. \n If this value is nil, the behavior is equivalent to the Honor policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag." + type: string + nodeTaintsPolicy: + description: "NodeTaintsPolicy indicates how we will treat node taints when calculating pod topology spread skew. Options are: - Honor: nodes without taints, along with tainted nodes for which the incoming pod has a toleration, are included. - Ignore: node taints are ignored. All nodes are included. \n If this value is nil, the behavior is equivalent to the Ignore policy. This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag." + type: string + topologyKey: + description: TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a "bucket", and try to put balanced number of pods into each bucket. We define a domain as a particular instance of a topology. Also, we define an eligible domain as a domain whose nodes meet the requirements of nodeAffinityPolicy and nodeTaintsPolicy. e.g. If TopologyKey is "kubernetes.io/hostname", each Node is a domain of that topology. And, if TopologyKey is "topology.kubernetes.io/zone", each zone is a domain of that topology. It's a required field. + type: string + whenUnsatisfiable: + description: 'WhenUnsatisfiable indicates how to deal with a pod if it doesn''t satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it. - ScheduleAnyway tells the scheduler to schedule the pod in any location, but giving higher precedence to topologies that would help reduce the skew. A constraint is considered "Unsatisfiable" for an incoming pod if and only if every possible node assignment for that pod would violate "MaxSkew" on some topology. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: | zone1 | zone2 | zone3 | | P P P | P | P | If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler won''t make it *more* imbalanced. It''s a required field.' + type: string + required: + - maxSkew + - topologyKey + - whenUnsatisfiable + type: object + type: array + unsupportedConfigOverrides: + description: unsupportedConfigOverrides overrides the final configuration that was computed by the operator. Red Hat does not support the use of this field. Misuse of this field could lead to unexpected behavior or conflict with other configuration options. Seek guidance from the Red Hat support before using this field. Use of this property blocks cluster upgrades, it must be removed before upgrading your cluster. + nullable: true + type: object + x-kubernetes-preserve-unknown-fields: true + required: + - replicas + type: object + status: + description: ImageRegistryStatus reports image registry operational status. + properties: + conditions: + description: conditions is a list of conditions and their status + items: + description: OperatorCondition is just the standard condition fields. + properties: + lastTransitionTime: + format: date-time + type: string + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + type: object + type: array + generations: + description: generations are used to determine when an item needs to be reconciled or has changed in a way that needs a reaction. + items: + description: GenerationStatus keeps track of the generation for a given resource so that decisions about forced updates can be made. + properties: + group: + description: group is the group of the thing you're tracking + type: string + hash: + description: hash is an optional field set for resources without generation that are content sensitive like secrets and configmaps + type: string + lastGeneration: + description: lastGeneration is the last generation of the workload controller involved + format: int64 + type: integer + name: + description: name is the name of the thing you're tracking + type: string + namespace: + description: namespace is where the thing you're tracking is + type: string + resource: + description: resource is the resource type of the thing you're tracking + type: string + type: object + type: array + observedGeneration: + description: observedGeneration is the last generation change you've dealt with + format: int64 + type: integer + readyReplicas: + description: readyReplicas indicates how many replicas are ready and at the desired state + format: int32 + type: integer + storage: + description: storage indicates the current applied storage configuration of the registry. + properties: + azure: + description: azure represents configuration that uses Azure Blob Storage. + properties: + accountName: + description: accountName defines the account to be used by the registry. + type: string + cloudName: + description: cloudName is the name of the Azure cloud environment to be used by the registry. If empty, the operator will set it based on the infrastructure object. + type: string + container: + description: container defines Azure's container to be used by registry. + maxLength: 63 + minLength: 3 + pattern: ^[0-9a-z]+(-[0-9a-z]+)*$ + type: string + type: object + emptyDir: + description: 'emptyDir represents ephemeral storage on the pod''s host node. WARNING: this storage cannot be used with more than 1 replica and is not suitable for production use. When the pod is removed from a node for any reason, the data in the emptyDir is deleted forever.' + type: object + gcs: + description: gcs represents configuration that uses Google Cloud Storage. + properties: + bucket: + description: bucket is the bucket name in which you want to store the registry's data. Optional, will be generated if not provided. + type: string + keyID: + description: keyID is the KMS key ID to use for encryption. Optional, buckets are encrypted by default on GCP. This allows for the use of a custom encryption key. + type: string + projectID: + description: projectID is the Project ID of the GCP project that this bucket should be associated with. + type: string + region: + description: region is the GCS location in which your bucket exists. Optional, will be set based on the installed GCS Region. + type: string + type: object + ibmcos: + description: ibmcos represents configuration that uses IBM Cloud Object Storage. + properties: + bucket: + description: bucket is the bucket name in which you want to store the registry's data. Optional, will be generated if not provided. + type: string + location: + description: location is the IBM Cloud location in which your bucket exists. Optional, will be set based on the installed IBM Cloud location. + type: string + resourceGroupName: + description: resourceGroupName is the name of the IBM Cloud resource group that this bucket and its service instance is associated with. Optional, will be set based on the installed IBM Cloud resource group. + type: string + resourceKeyCRN: + description: resourceKeyCRN is the CRN of the IBM Cloud resource key that is created for the service instance. Commonly referred as a service credential and must contain HMAC type credentials. Optional, will be computed if not provided. + pattern: ^crn:.+:.+:.+:cloud-object-storage:.+:.+:.+:resource-key:.+$ + type: string + serviceInstanceCRN: + description: serviceInstanceCRN is the CRN of the IBM Cloud Object Storage service instance that this bucket is associated with. Optional, will be computed if not provided. + pattern: ^crn:.+:.+:.+:cloud-object-storage:.+:.+:.+::$ + type: string + type: object + managementState: + description: managementState indicates if the operator manages the underlying storage unit. If Managed the operator will remove the storage when this operator gets Removed. + pattern: ^(Managed|Unmanaged)$ + type: string + oss: + description: Oss represents configuration that uses Alibaba Cloud Object Storage Service. + properties: + bucket: + description: Bucket is the bucket name in which you want to store the registry's data. About Bucket naming, more details you can look at the [official documentation](https://www.alibabacloud.com/help/doc-detail/257087.htm) Empty value means no opinion and the platform chooses the a default, which is subject to change over time. Currently the default will be autogenerated in the form of -image-registry-- + maxLength: 63 + minLength: 3 + pattern: ^[0-9a-z]+(-[0-9a-z]+)*$ + type: string + encryption: + description: Encryption specifies whether you would like your data encrypted on the server side. More details, you can look cat the [official documentation](https://www.alibabacloud.com/help/doc-detail/117914.htm) + properties: + kms: + description: KMS (key management service) is an encryption type that holds the struct for KMS KeyID + properties: + keyID: + description: KeyID holds the KMS encryption key ID + minLength: 1 + type: string + required: + - keyID + type: object + method: + default: AES256 + description: Method defines the different encrytion modes available Empty value means no opinion and the platform chooses the a default, which is subject to change over time. Currently the default is `AES256`. + enum: + - KMS + - AES256 + type: string + type: object + endpointAccessibility: + default: Internal + description: EndpointAccessibility specifies whether the registry use the OSS VPC internal endpoint Empty value means no opinion and the platform chooses the a default, which is subject to change over time. Currently the default is `Internal`. + enum: + - Internal + - Public + - "" + type: string + region: + description: Region is the Alibaba Cloud Region in which your bucket exists. For a list of regions, you can look at the [official documentation](https://www.alibabacloud.com/help/doc-detail/31837.html). Empty value means no opinion and the platform chooses the a default, which is subject to change over time. Currently the default will be based on the installed Alibaba Cloud Region. + type: string + type: object + pvc: + description: pvc represents configuration that uses a PersistentVolumeClaim. + properties: + claim: + description: claim defines the Persisent Volume Claim's name to be used. + type: string + type: object + s3: + description: s3 represents configuration that uses Amazon Simple Storage Service. + properties: + bucket: + description: bucket is the bucket name in which you want to store the registry's data. Optional, will be generated if not provided. + type: string + cloudFront: + description: cloudFront configures Amazon Cloudfront as the storage middleware in a registry. + properties: + baseURL: + description: baseURL contains the SCHEME://HOST[/PATH] at which Cloudfront is served. + type: string + duration: + description: duration is the duration of the Cloudfront session. + format: duration + type: string + keypairID: + description: keypairID is key pair ID provided by AWS. + type: string + privateKey: + description: privateKey points to secret containing the private key, provided by AWS. + properties: + key: + description: The key of the secret to select from. Must be a valid secret key. + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + optional: + description: Specify whether the Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + required: + - baseURL + - keypairID + - privateKey + type: object + encrypt: + description: encrypt specifies whether the registry stores the image in encrypted format or not. Optional, defaults to false. + type: boolean + keyID: + description: keyID is the KMS key ID to use for encryption. Optional, Encrypt must be true, or this parameter is ignored. + type: string + region: + description: region is the AWS region in which your bucket exists. Optional, will be set based on the installed AWS Region. + type: string + regionEndpoint: + description: regionEndpoint is the endpoint for S3 compatible storage services. It should be a valid URL with scheme, e.g. https://s3.example.com. Optional, defaults based on the Region that is provided. + type: string + trustedCA: + description: "trustedCA is a reference to a config map containing a CA bundle. The image registry and its operator use certificates from this bundle to verify S3 server certificates. \n The namespace for the config map referenced by trustedCA is \"openshift-config\". The key for the bundle in the config map is \"ca-bundle.crt\"." + properties: + name: + description: name is the metadata.name of the referenced config map. This field must adhere to standard config map naming restrictions. The name must consist solely of alphanumeric characters, hyphens (-) and periods (.). It has a maximum length of 253 characters. If this field is not specified or is empty string, the default trust bundle will be used. + maxLength: 253 + pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + type: object + virtualHostedStyle: + description: virtualHostedStyle enables using S3 virtual hosted style bucket paths with a custom RegionEndpoint Optional, defaults to false. + type: boolean + type: object + swift: + description: swift represents configuration that uses OpenStack Object Storage. + properties: + authURL: + description: authURL defines the URL for obtaining an authentication token. + type: string + authVersion: + description: authVersion specifies the OpenStack Auth's version. + type: string + container: + description: container defines the name of Swift container where to store the registry's data. + type: string + domain: + description: domain specifies Openstack's domain name for Identity v3 API. + type: string + domainID: + description: domainID specifies Openstack's domain id for Identity v3 API. + type: string + regionName: + description: regionName defines Openstack's region in which container exists. + type: string + tenant: + description: tenant defines Openstack tenant name to be used by registry. + type: string + tenantID: + description: tenant defines Openstack tenant id to be used by registry. + type: string + type: object + type: object + storageManaged: + description: storageManaged is deprecated, please refer to Storage.managementState + type: boolean + version: + description: version is the level this availability applies to + type: string + required: + - storage + - storageManaged + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/imageregistry/v1/00_imageregistry.crd.yaml-patch b/vendor/github.com/openshift/api/imageregistry/v1/00_imageregistry.crd.yaml-patch new file mode 100644 index 000000000..1bd29f566 --- /dev/null +++ b/vendor/github.com/openshift/api/imageregistry/v1/00_imageregistry.crd.yaml-patch @@ -0,0 +1,13 @@ +- op: add + path: /spec/versions/name=v1/schema/openAPIV3Schema/properties/spec/properties/storage/properties/oss/properties/encryption/anyOf + value: + - properties: + method: + not: + enum: ["KMS"] + not: + required: ["kms"] + - properties: + method: + enum: ["KMS"] + required: ["kms"] diff --git a/vendor/github.com/openshift/api/imageregistry/v1/01_imagepruner.crd.yaml b/vendor/github.com/openshift/api/imageregistry/v1/01_imagepruner.crd.yaml new file mode 100644 index 000000000..cc1dc208f --- /dev/null +++ b/vendor/github.com/openshift/api/imageregistry/v1/01_imagepruner.crd.yaml @@ -0,0 +1,644 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/555 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: imagepruners.imageregistry.operator.openshift.io +spec: + group: imageregistry.operator.openshift.io + names: + kind: ImagePruner + listKind: ImagePrunerList + plural: imagepruners + singular: imagepruner + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "ImagePruner is the configuration object for an image registry pruner managed by the registry operator. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + type: object + required: + - metadata + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ImagePrunerSpec defines the specs for the running image pruner. + type: object + properties: + affinity: + description: affinity is a group of node affinity scheduling rules for the image pruner pod. + type: object + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for the pod. + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. + type: array + items: + description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + type: object + required: + - preference + - weight + properties: + preference: + description: A node selector term, associated with the corresponding weight. + type: object + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + type: array + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchFields: + description: A list of node selector requirements by node's fields. + type: array + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + type: array + items: + type: string + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. + type: integer + format: int32 + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. + type: object + required: + - nodeSelectorTerms + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. The terms are ORed. + type: array + items: + description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + type: object + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + type: array + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchFields: + description: A list of node selector requirements by node's fields. + type: array + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + type: array + items: + type: string + x-kubernetes-map-type: atomic + x-kubernetes-map-type: atomic + podAffinity: + description: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + type: array + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + type: object + required: + - podAffinityTerm + - weight + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + type: array + items: + type: string + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + type: integer + format: int32 + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + type: array + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + type: array + items: + type: string + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + type: array + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + type: object + required: + - podAffinityTerm + - weight + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + type: array + items: + type: string + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + type: integer + format: int32 + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + type: array + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + type: array + items: + type: string + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + failedJobsHistoryLimit: + description: failedJobsHistoryLimit specifies how many failed image pruner jobs to retain. Defaults to 3 if not set. + type: integer + format: int32 + ignoreInvalidImageReferences: + description: ignoreInvalidImageReferences indicates whether the pruner can ignore errors while parsing image references. + type: boolean + keepTagRevisions: + description: keepTagRevisions specifies the number of image revisions for a tag in an image stream that will be preserved. Defaults to 3. + type: integer + keepYoungerThan: + description: 'keepYoungerThan specifies the minimum age in nanoseconds of an image and its referrers for it to be considered a candidate for pruning. DEPRECATED: This field is deprecated in favor of keepYoungerThanDuration. If both are set, this field is ignored and keepYoungerThanDuration takes precedence.' + type: integer + format: int64 + keepYoungerThanDuration: + description: keepYoungerThanDuration specifies the minimum age of an image and its referrers for it to be considered a candidate for pruning. Defaults to 60m (60 minutes). + type: string + format: duration + logLevel: + description: "logLevel sets the level of log output for the pruner job. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + type: string + default: Normal + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + nodeSelector: + description: nodeSelector defines the node selection constraints for the image pruner pod. + type: object + additionalProperties: + type: string + resources: + description: resources defines the resource requests and limits for the image pruner pod. + type: object + properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable. It can only be set for containers." + type: array + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + type: object + required: + - name + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + additionalProperties: + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + requests: + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + additionalProperties: + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + schedule: + description: 'schedule specifies when to execute the job using standard cronjob syntax: https://wikipedia.org/wiki/Cron. Defaults to `0 0 * * *`.' + type: string + successfulJobsHistoryLimit: + description: successfulJobsHistoryLimit specifies how many successful image pruner jobs to retain. Defaults to 3 if not set. + type: integer + format: int32 + suspend: + description: suspend specifies whether or not to suspend subsequent executions of this cronjob. Defaults to false. + type: boolean + tolerations: + description: tolerations defines the node tolerations for the image pruner pod. + type: array + items: + description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . + type: object + properties: + effect: + description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. + type: integer + format: int64 + value: + description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + status: + description: ImagePrunerStatus reports image pruner operational status. + type: object + properties: + conditions: + description: conditions is a list of conditions and their status. + type: array + items: + description: OperatorCondition is just the standard condition fields. + type: object + properties: + lastTransitionTime: + type: string + format: date-time + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + observedGeneration: + description: observedGeneration is the last generation change that has been applied. + type: integer + format: int64 + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/imageregistry/v1/Makefile b/vendor/github.com/openshift/api/imageregistry/v1/Makefile new file mode 100644 index 000000000..ecef2e270 --- /dev/null +++ b/vendor/github.com/openshift/api/imageregistry/v1/Makefile @@ -0,0 +1,3 @@ +.PHONY: test +test: + make -C ../../tests test GINKGO_EXTRA_ARGS=--focus="imageregistry.operator.openshift.io/v1" diff --git a/vendor/github.com/openshift/api/imageregistry/v1/doc.go b/vendor/github.com/openshift/api/imageregistry/v1/doc.go new file mode 100644 index 000000000..32ad6f814 --- /dev/null +++ b/vendor/github.com/openshift/api/imageregistry/v1/doc.go @@ -0,0 +1,3 @@ +// +k8s:deepcopy-gen=package +// +groupName=imageregistry.operator.openshift.io +package v1 diff --git a/vendor/github.com/openshift/api/imageregistry/v1/register.go b/vendor/github.com/openshift/api/imageregistry/v1/register.go new file mode 100644 index 000000000..b5f708c1b --- /dev/null +++ b/vendor/github.com/openshift/api/imageregistry/v1/register.go @@ -0,0 +1,48 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +const ( + version = "v1" + groupName = "imageregistry.operator.openshift.io" +) + +var ( + scheme = runtime.NewScheme() + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + GroupVersion = schema.GroupVersion{Group: groupName, Version: version} + // Install is a function which adds this version to a scheme + Install = SchemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = SchemeBuilder.AddToScheme +) + +func init() { + AddToScheme(scheme) +} + +// addKnownTypes adds the set of types defined in this package to the supplied scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &Config{}, + &ConfigList{}, + &ImagePruner{}, + &ImagePrunerList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} diff --git a/vendor/github.com/openshift/api/imageregistry/v1/stable.config.testsuite.yaml b/vendor/github.com/openshift/api/imageregistry/v1/stable.config.testsuite.yaml new file mode 100644 index 000000000..85a6b45d5 --- /dev/null +++ b/vendor/github.com/openshift/api/imageregistry/v1/stable.config.testsuite.yaml @@ -0,0 +1,18 @@ +apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this +name: "[Stable] Config" +crd: 00_imageregistry.crd.yaml +tests: + onCreate: + - name: Should be able to create a minimal Config + initial: | + apiVersion: imageregistry.operator.openshift.io/v1 + kind: Config + spec: + replicas: 1 + expected: | + apiVersion: imageregistry.operator.openshift.io/v1 + kind: Config + spec: + logLevel: Normal + operatorLogLevel: Normal + replicas: 1 diff --git a/vendor/github.com/openshift/api/imageregistry/v1/stable.imagepruner.testsuite.yaml b/vendor/github.com/openshift/api/imageregistry/v1/stable.imagepruner.testsuite.yaml new file mode 100644 index 000000000..53c055cb2 --- /dev/null +++ b/vendor/github.com/openshift/api/imageregistry/v1/stable.imagepruner.testsuite.yaml @@ -0,0 +1,15 @@ +apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this +name: "[Stable] ImagePruner" +crd: 01_imagepruner.crd.yaml +tests: + onCreate: + - name: Should be able to create a minimal ImagePruner + initial: | + apiVersion: imageregistry.operator.openshift.io/v1 + kind: ImagePruner + spec: {} # No spec is required for a ImagePruner + expected: | + apiVersion: imageregistry.operator.openshift.io/v1 + kind: ImagePruner + spec: + logLevel: Normal diff --git a/vendor/github.com/openshift/api/imageregistry/v1/types.go b/vendor/github.com/openshift/api/imageregistry/v1/types.go new file mode 100644 index 000000000..e9c0ca309 --- /dev/null +++ b/vendor/github.com/openshift/api/imageregistry/v1/types.go @@ -0,0 +1,489 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + operatorv1 "github.com/openshift/api/operator/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ConfigList is a slice of Config objects. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ConfigList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + Items []Config `json:"items"` +} + +const ( + // StorageManagementStateManaged indicates the operator is managing the underlying storage. + StorageManagementStateManaged = "Managed" + // StorageManagementStateUnmanaged indicates the operator is not managing the underlying + // storage. + StorageManagementStateUnmanaged = "Unmanaged" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Config is the configuration object for a registry instance managed by +// the registry operator +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type Config struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata"` + + Spec ImageRegistrySpec `json:"spec"` + // +optional + Status ImageRegistryStatus `json:"status,omitempty"` +} + +// ImageRegistrySpec defines the specs for the running registry. +type ImageRegistrySpec struct { + // operatorSpec allows operator specific configuration to be made. + operatorv1.OperatorSpec `json:",inline"` + // httpSecret is the value needed by the registry to secure uploads, generated by default. + // +optional + HTTPSecret string `json:"httpSecret,omitempty"` + // proxy defines the proxy to be used when calling master api, upstream + // registries, etc. + // +optional + Proxy ImageRegistryConfigProxy `json:"proxy,omitempty"` + // storage details for configuring registry storage, e.g. S3 bucket + // coordinates. + // +optional + Storage ImageRegistryConfigStorage `json:"storage,omitempty"` + // readOnly indicates whether the registry instance should reject attempts + // to push new images or delete existing ones. + // +optional + ReadOnly bool `json:"readOnly,omitempty"` + // disableRedirect controls whether to route all data through the Registry, + // rather than redirecting to the backend. + // +optional + DisableRedirect bool `json:"disableRedirect,omitempty"` + // requests controls how many parallel requests a given registry instance + // will handle before queuing additional requests. + // +optional + Requests ImageRegistryConfigRequests `json:"requests,omitempty"` + // defaultRoute indicates whether an external facing route for the registry + // should be created using the default generated hostname. + // +optional + DefaultRoute bool `json:"defaultRoute,omitempty"` + // routes defines additional external facing routes which should be + // created for the registry. + // +optional + Routes []ImageRegistryConfigRoute `json:"routes,omitempty"` + // replicas determines the number of registry instances to run. + Replicas int32 `json:"replicas"` + // logging is deprecated, use logLevel instead. + // +optional + Logging int64 `json:"logging,omitempty"` + // resources defines the resource requests+limits for the registry pod. + // +optional + Resources *corev1.ResourceRequirements `json:"resources,omitempty"` + // nodeSelector defines the node selection constraints for the registry + // pod. + // +optional + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + // tolerations defines the tolerations for the registry pod. + // +optional + Tolerations []corev1.Toleration `json:"tolerations,omitempty"` + // rolloutStrategy defines rollout strategy for the image registry + // deployment. + // +optional + // +kubebuilder:validation:Pattern=`^(RollingUpdate|Recreate)$` + RolloutStrategy string `json:"rolloutStrategy,omitempty"` + // affinity is a group of node affinity scheduling rules for the image registry pod(s). + // +optional + Affinity *corev1.Affinity `json:"affinity,omitempty"` + // topologySpreadConstraints specify how to spread matching pods among the given topology. + // +optional + TopologySpreadConstraints []corev1.TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty"` +} + +// ImageRegistryStatus reports image registry operational status. +type ImageRegistryStatus struct { + operatorv1.OperatorStatus `json:",inline"` + + // storageManaged is deprecated, please refer to Storage.managementState + StorageManaged bool `json:"storageManaged"` + // storage indicates the current applied storage configuration of the + // registry. + Storage ImageRegistryConfigStorage `json:"storage"` +} + +// ImageRegistryConfigProxy defines proxy configuration to be used by registry. +type ImageRegistryConfigProxy struct { + // http defines the proxy to be used by the image registry when + // accessing HTTP endpoints. + // +optional + HTTP string `json:"http,omitempty"` + // https defines the proxy to be used by the image registry when + // accessing HTTPS endpoints. + // +optional + HTTPS string `json:"https,omitempty"` + // noProxy defines a comma-separated list of host names that shouldn't + // go through any proxy. + // +optional + NoProxy string `json:"noProxy,omitempty"` +} + +// ImageRegistryConfigStorageS3CloudFront holds the configuration +// to use Amazon Cloudfront as the storage middleware in a registry. +// https://docs.docker.com/registry/configuration/#cloudfront +type ImageRegistryConfigStorageS3CloudFront struct { + // baseURL contains the SCHEME://HOST[/PATH] at which Cloudfront is served. + BaseURL string `json:"baseURL"` + // privateKey points to secret containing the private key, provided by AWS. + PrivateKey corev1.SecretKeySelector `json:"privateKey"` + // keypairID is key pair ID provided by AWS. + KeypairID string `json:"keypairID"` + // duration is the duration of the Cloudfront session. + // +optional + // +kubebuilder:validation:Format=duration + Duration metav1.Duration `json:"duration,omitempty"` +} + +// ImageRegistryConfigStorageEmptyDir is an place holder to be used when +// when registry is leveraging ephemeral storage. +type ImageRegistryConfigStorageEmptyDir struct { +} + +// S3TrustedCASource references a config map with a CA certificate bundle in +// the "openshift-config" namespace. The key for the bundle in the +// config map is "ca-bundle.crt". +type S3TrustedCASource struct { + // name is the metadata.name of the referenced config map. + // This field must adhere to standard config map naming restrictions. + // The name must consist solely of alphanumeric characters, hyphens (-) + // and periods (.). It has a maximum length of 253 characters. + // If this field is not specified or is empty string, the default trust + // bundle will be used. + // +kubebuilder:validation:MaxLength=253 + // +kubebuilder:validation:Pattern=`^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$` + // +optional + Name string `json:"name"` +} + +// ImageRegistryConfigStorageS3 holds the information to configure +// the registry to use the AWS S3 service for backend storage +// https://docs.docker.com/registry/storage-drivers/s3/ +type ImageRegistryConfigStorageS3 struct { + // bucket is the bucket name in which you want to store the registry's + // data. + // Optional, will be generated if not provided. + // +optional + Bucket string `json:"bucket,omitempty"` + // region is the AWS region in which your bucket exists. + // Optional, will be set based on the installed AWS Region. + // +optional + Region string `json:"region,omitempty"` + // regionEndpoint is the endpoint for S3 compatible storage services. + // It should be a valid URL with scheme, e.g. https://s3.example.com. + // Optional, defaults based on the Region that is provided. + // +optional + RegionEndpoint string `json:"regionEndpoint,omitempty"` + // encrypt specifies whether the registry stores the image in encrypted + // format or not. + // Optional, defaults to false. + // +optional + Encrypt bool `json:"encrypt,omitempty"` + // keyID is the KMS key ID to use for encryption. + // Optional, Encrypt must be true, or this parameter is ignored. + // +optional + KeyID string `json:"keyID,omitempty"` + // cloudFront configures Amazon Cloudfront as the storage middleware in a + // registry. + // +optional + CloudFront *ImageRegistryConfigStorageS3CloudFront `json:"cloudFront,omitempty"` + // virtualHostedStyle enables using S3 virtual hosted style bucket paths with + // a custom RegionEndpoint + // Optional, defaults to false. + // +optional + VirtualHostedStyle bool `json:"virtualHostedStyle"` + // trustedCA is a reference to a config map containing a CA bundle. The + // image registry and its operator use certificates from this bundle to + // verify S3 server certificates. + // + // The namespace for the config map referenced by trustedCA is + // "openshift-config". The key for the bundle in the config map is + // "ca-bundle.crt". + // +optional + TrustedCA S3TrustedCASource `json:"trustedCA"` +} + +// ImageRegistryConfigStorageGCS holds GCS configuration. +type ImageRegistryConfigStorageGCS struct { + // bucket is the bucket name in which you want to store the registry's + // data. + // Optional, will be generated if not provided. + // +optional + Bucket string `json:"bucket,omitempty"` + // region is the GCS location in which your bucket exists. + // Optional, will be set based on the installed GCS Region. + // +optional + Region string `json:"region,omitempty"` + // projectID is the Project ID of the GCP project that this bucket should + // be associated with. + // +optional + ProjectID string `json:"projectID,omitempty"` + // keyID is the KMS key ID to use for encryption. + // Optional, buckets are encrypted by default on GCP. + // This allows for the use of a custom encryption key. + // +optional + KeyID string `json:"keyID,omitempty"` +} + +// ImageRegistryConfigStorageSwift holds the information to configure +// the registry to use the OpenStack Swift service for backend storage +// https://docs.docker.com/registry/storage-drivers/swift/ +type ImageRegistryConfigStorageSwift struct { + // authURL defines the URL for obtaining an authentication token. + // +optional + AuthURL string `json:"authURL,omitempty"` + // authVersion specifies the OpenStack Auth's version. + // +optional + AuthVersion string `json:"authVersion,omitempty"` + // container defines the name of Swift container where to store the + // registry's data. + // +optional + Container string `json:"container,omitempty"` + // domain specifies Openstack's domain name for Identity v3 API. + // +optional + Domain string `json:"domain,omitempty"` + // domainID specifies Openstack's domain id for Identity v3 API. + // +optional + DomainID string `json:"domainID,omitempty"` + // tenant defines Openstack tenant name to be used by registry. + // +optional + Tenant string `json:"tenant,omitempty"` + // tenant defines Openstack tenant id to be used by registry. + // +optional + TenantID string `json:"tenantID,omitempty"` + // regionName defines Openstack's region in which container exists. + // +optional + RegionName string `json:"regionName,omitempty"` +} + +// ImageRegistryConfigStoragePVC holds Persistent Volume Claims data to +// be used by the registry. +type ImageRegistryConfigStoragePVC struct { + // claim defines the Persisent Volume Claim's name to be used. + // +optional + Claim string `json:"claim,omitempty"` +} + +// ImageRegistryConfigStorageAzure holds the information to configure +// the registry to use Azure Blob Storage for backend storage. +type ImageRegistryConfigStorageAzure struct { + // accountName defines the account to be used by the registry. + // +optional + AccountName string `json:"accountName,omitempty"` + // container defines Azure's container to be used by registry. + // +optional + // +kubebuilder:validation:MaxLength=63 + // +kubebuilder:validation:MinLength=3 + // +kubebuilder:validation:Pattern=`^[0-9a-z]+(-[0-9a-z]+)*$` + Container string `json:"container,omitempty"` + // cloudName is the name of the Azure cloud environment to be used by the + // registry. If empty, the operator will set it based on the infrastructure + // object. + // +optional + CloudName string `json:"cloudName,omitempty"` +} + +// ImageRegistryConfigStorageIBMCOS holds the information to configure +// the registry to use IBM Cloud Object Storage for backend storage. +type ImageRegistryConfigStorageIBMCOS struct { + // bucket is the bucket name in which you want to store the registry's + // data. + // Optional, will be generated if not provided. + // +optional + Bucket string `json:"bucket,omitempty"` + // location is the IBM Cloud location in which your bucket exists. + // Optional, will be set based on the installed IBM Cloud location. + // +optional + Location string `json:"location,omitempty"` + // resourceGroupName is the name of the IBM Cloud resource group that this + // bucket and its service instance is associated with. + // Optional, will be set based on the installed IBM Cloud resource group. + // +optional + ResourceGroupName string `json:"resourceGroupName,omitempty"` + // resourceKeyCRN is the CRN of the IBM Cloud resource key that is created + // for the service instance. Commonly referred as a service credential and + // must contain HMAC type credentials. + // Optional, will be computed if not provided. + // +optional + // +kubebuilder:validation:Pattern=`^crn:.+:.+:.+:cloud-object-storage:.+:.+:.+:resource-key:.+$` + ResourceKeyCRN string `json:"resourceKeyCRN,omitempty"` + // serviceInstanceCRN is the CRN of the IBM Cloud Object Storage service + // instance that this bucket is associated with. + // Optional, will be computed if not provided. + // +optional + // +kubebuilder:validation:Pattern=`^crn:.+:.+:.+:cloud-object-storage:.+:.+:.+::$` + ServiceInstanceCRN string `json:"serviceInstanceCRN,omitempty"` +} + +// EndpointAccessibility defines the Alibaba VPC endpoint for storage +type EndpointAccessibility string + +// AlibabaEncryptionMethod defines an enumerable type for the encryption mode +type AlibabaEncryptionMethod string + +const ( + // InternalEndpoint sets the VPC endpoint to internal + InternalEndpoint EndpointAccessibility = "Internal" + // PublicEndpoint sets the VPC endpoint to public + PublicEndpoint EndpointAccessibility = "Public" + + // AES256 is an AlibabaEncryptionMethod. This means AES256 encryption + AES256 AlibabaEncryptionMethod = "AES256" + // KMS is an AlibabaEncryptionMethod. This means KMS encryption + KMS AlibabaEncryptionMethod = "KMS" +) + +// EncryptionAlibaba this a union type in kube parlance. Depending on the value for the AlibabaEncryptionMethod, +// different pointers may be used +type EncryptionAlibaba struct { + // Method defines the different encrytion modes available + // Empty value means no opinion and the platform chooses the a default, which is subject to change over time. + // Currently the default is `AES256`. + // +kubebuilder:validation:Enum="KMS";"AES256" + // +kubebuilder:default="AES256" + // +optional + Method AlibabaEncryptionMethod `json:"method"` + + // KMS (key management service) is an encryption type that holds the struct for KMS KeyID + // +optional + KMS *KMSEncryptionAlibaba `json:"kms,omitempty"` +} + +type KMSEncryptionAlibaba struct { + // KeyID holds the KMS encryption key ID + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + KeyID string `json:"keyID"` +} + +// ImageRegistryConfigStorageAlibabaOSS holds Alibaba Cloud OSS configuration. +// Configures the registry to use Alibaba Cloud Object Storage Service for backend storage. +// More about oss, you can look at the [official documentation](https://www.alibabacloud.com/help/product/31815.htm) +type ImageRegistryConfigStorageAlibabaOSS struct { + // Bucket is the bucket name in which you want to store the registry's data. + // About Bucket naming, more details you can look at the [official documentation](https://www.alibabacloud.com/help/doc-detail/257087.htm) + // Empty value means no opinion and the platform chooses the a default, which is subject to change over time. + // Currently the default will be autogenerated in the form of -image-registry-- + // +kubebuilder:validation:MaxLength=63 + // +kubebuilder:validation:MinLength=3 + // +kubebuilder:validation:Pattern=`^[0-9a-z]+(-[0-9a-z]+)*$` + // +optional + Bucket string `json:"bucket,omitempty"` + // Region is the Alibaba Cloud Region in which your bucket exists. + // For a list of regions, you can look at the [official documentation](https://www.alibabacloud.com/help/doc-detail/31837.html). + // Empty value means no opinion and the platform chooses the a default, which is subject to change over time. + // Currently the default will be based on the installed Alibaba Cloud Region. + // +optional + Region string `json:"region,omitempty"` + // EndpointAccessibility specifies whether the registry use the OSS VPC internal endpoint + // Empty value means no opinion and the platform chooses the a default, which is subject to change over time. + // Currently the default is `Internal`. + // +kubebuilder:validation:Enum="Internal";"Public";"" + // +kubebuilder:default="Internal" + // +optional + EndpointAccessibility EndpointAccessibility `json:"endpointAccessibility,omitempty"` + // Encryption specifies whether you would like your data encrypted on the server side. + // More details, you can look cat the [official documentation](https://www.alibabacloud.com/help/doc-detail/117914.htm) + // +optional + Encryption *EncryptionAlibaba `json:"encryption,omitempty"` +} + +// ImageRegistryConfigStorage describes how the storage should be configured +// for the image registry. +type ImageRegistryConfigStorage struct { + // emptyDir represents ephemeral storage on the pod's host node. + // WARNING: this storage cannot be used with more than 1 replica and + // is not suitable for production use. When the pod is removed from a + // node for any reason, the data in the emptyDir is deleted forever. + // +optional + EmptyDir *ImageRegistryConfigStorageEmptyDir `json:"emptyDir,omitempty"` + // s3 represents configuration that uses Amazon Simple Storage Service. + // +optional + S3 *ImageRegistryConfigStorageS3 `json:"s3,omitempty"` + // gcs represents configuration that uses Google Cloud Storage. + // +optional + GCS *ImageRegistryConfigStorageGCS `json:"gcs,omitempty"` + // swift represents configuration that uses OpenStack Object Storage. + // +optional + Swift *ImageRegistryConfigStorageSwift `json:"swift,omitempty"` + // pvc represents configuration that uses a PersistentVolumeClaim. + // +optional + PVC *ImageRegistryConfigStoragePVC `json:"pvc,omitempty"` + // azure represents configuration that uses Azure Blob Storage. + // +optional + Azure *ImageRegistryConfigStorageAzure `json:"azure,omitempty"` + // ibmcos represents configuration that uses IBM Cloud Object Storage. + // +optional + IBMCOS *ImageRegistryConfigStorageIBMCOS `json:"ibmcos,omitempty"` + // Oss represents configuration that uses Alibaba Cloud Object Storage Service. + // +optional + OSS *ImageRegistryConfigStorageAlibabaOSS `json:"oss,omitempty"` + // managementState indicates if the operator manages the underlying + // storage unit. If Managed the operator will remove the storage when + // this operator gets Removed. + // +optional + // +kubebuilder:validation:Pattern=`^(Managed|Unmanaged)$` + ManagementState string `json:"managementState,omitempty"` +} + +// ImageRegistryConfigRequests defines registry limits on requests read and write. +type ImageRegistryConfigRequests struct { + // read defines limits for image registry's reads. + // +optional + Read ImageRegistryConfigRequestsLimits `json:"read,omitempty"` + // write defines limits for image registry's writes. + // +optional + Write ImageRegistryConfigRequestsLimits `json:"write,omitempty"` +} + +// ImageRegistryConfigRequestsLimits holds configuration on the max, enqueued +// and waiting registry's API requests. +type ImageRegistryConfigRequestsLimits struct { + // maxRunning sets the maximum in flight api requests to the registry. + // +optional + MaxRunning int `json:"maxRunning,omitempty"` + // maxInQueue sets the maximum queued api requests to the registry. + // +optional + MaxInQueue int `json:"maxInQueue,omitempty"` + // maxWaitInQueue sets the maximum time a request can wait in the queue + // before being rejected. + // +optional + // +kubebuilder:validation:Format=duration + MaxWaitInQueue metav1.Duration `json:"maxWaitInQueue,omitempty"` +} + +// ImageRegistryConfigRoute holds information on external route access to image +// registry. +type ImageRegistryConfigRoute struct { + // name of the route to be created. + Name string `json:"name"` + // hostname for the route. + // +optional + Hostname string `json:"hostname,omitempty"` + // secretName points to secret containing the certificates to be used + // by the route. + // +optional + SecretName string `json:"secretName,omitempty"` +} diff --git a/vendor/github.com/openshift/api/imageregistry/v1/types_imagepruner.go b/vendor/github.com/openshift/api/imageregistry/v1/types_imagepruner.go new file mode 100644 index 000000000..3c4b6c51c --- /dev/null +++ b/vendor/github.com/openshift/api/imageregistry/v1/types_imagepruner.go @@ -0,0 +1,112 @@ +package v1 + +import ( + "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + operatorv1 "github.com/openshift/api/operator/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ImagePrunerList is a slice of ImagePruner objects. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ImagePrunerList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + Items []ImagePruner `json:"items"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ImagePruner is the configuration object for an image registry pruner +// managed by the registry operator. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ImagePruner struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata"` + + Spec ImagePrunerSpec `json:"spec"` + // +optional + Status ImagePrunerStatus `json:"status"` +} + +// ImagePrunerSpec defines the specs for the running image pruner. +type ImagePrunerSpec struct { + // schedule specifies when to execute the job using standard cronjob syntax: https://wikipedia.org/wiki/Cron. + // Defaults to `0 0 * * *`. + // +optional + Schedule string `json:"schedule"` + // suspend specifies whether or not to suspend subsequent executions of this cronjob. + // Defaults to false. + // +optional + Suspend *bool `json:"suspend,omitempty"` + // keepTagRevisions specifies the number of image revisions for a tag in an image stream that will be preserved. + // Defaults to 3. + // +optional + KeepTagRevisions *int `json:"keepTagRevisions,omitempty"` + // keepYoungerThan specifies the minimum age in nanoseconds of an image and its referrers for it to be considered a candidate for pruning. + // DEPRECATED: This field is deprecated in favor of keepYoungerThanDuration. If both are set, this field is ignored and keepYoungerThanDuration takes precedence. + // +optional + KeepYoungerThan *time.Duration `json:"keepYoungerThan,omitempty"` + // keepYoungerThanDuration specifies the minimum age of an image and its referrers for it to be considered a candidate for pruning. + // Defaults to 60m (60 minutes). + // +optional + // +kubebuilder:validation:Format=duration + KeepYoungerThanDuration *metav1.Duration `json:"keepYoungerThanDuration,omitempty"` + // resources defines the resource requests and limits for the image pruner pod. + // +optional + Resources *corev1.ResourceRequirements `json:"resources,omitempty"` + // affinity is a group of node affinity scheduling rules for the image pruner pod. + // +optional + Affinity *corev1.Affinity `json:"affinity,omitempty"` + // nodeSelector defines the node selection constraints for the image pruner pod. + // +optional + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + // tolerations defines the node tolerations for the image pruner pod. + // +optional + Tolerations []corev1.Toleration `json:"tolerations,omitempty"` + // successfulJobsHistoryLimit specifies how many successful image pruner jobs to retain. + // Defaults to 3 if not set. + // +optional + SuccessfulJobsHistoryLimit *int32 `json:"successfulJobsHistoryLimit,omitempty"` + // failedJobsHistoryLimit specifies how many failed image pruner jobs to retain. + // Defaults to 3 if not set. + // +optional + FailedJobsHistoryLimit *int32 `json:"failedJobsHistoryLimit,omitempty"` + // ignoreInvalidImageReferences indicates whether the pruner can ignore + // errors while parsing image references. + // +optional + IgnoreInvalidImageReferences bool `json:"ignoreInvalidImageReferences,omitempty"` + // logLevel sets the level of log output for the pruner job. + // + // Valid values are: "Normal", "Debug", "Trace", "TraceAll". + // Defaults to "Normal". + // +optional + // +kubebuilder:default=Normal + LogLevel operatorv1.LogLevel `json:"logLevel,omitempty"` +} + +// ImagePrunerStatus reports image pruner operational status. +type ImagePrunerStatus struct { + // observedGeneration is the last generation change that has been applied. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + // conditions is a list of conditions and their status. + // +optional + Conditions []operatorv1.OperatorCondition `json:"conditions,omitempty"` +} diff --git a/vendor/github.com/openshift/api/imageregistry/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/imageregistry/v1/zz_generated.deepcopy.go new file mode 100644 index 000000000..7519720a1 --- /dev/null +++ b/vendor/github.com/openshift/api/imageregistry/v1/zz_generated.deepcopy.go @@ -0,0 +1,637 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + time "time" + + operatorv1 "github.com/openshift/api/operator/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Config) DeepCopyInto(out *Config) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Config. +func (in *Config) DeepCopy() *Config { + if in == nil { + return nil + } + out := new(Config) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Config) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigList) DeepCopyInto(out *ConfigList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Config, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigList. +func (in *ConfigList) DeepCopy() *ConfigList { + if in == nil { + return nil + } + out := new(ConfigList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ConfigList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EncryptionAlibaba) DeepCopyInto(out *EncryptionAlibaba) { + *out = *in + if in.KMS != nil { + in, out := &in.KMS, &out.KMS + *out = new(KMSEncryptionAlibaba) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EncryptionAlibaba. +func (in *EncryptionAlibaba) DeepCopy() *EncryptionAlibaba { + if in == nil { + return nil + } + out := new(EncryptionAlibaba) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImagePruner) DeepCopyInto(out *ImagePruner) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePruner. +func (in *ImagePruner) DeepCopy() *ImagePruner { + if in == nil { + return nil + } + out := new(ImagePruner) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImagePruner) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImagePrunerList) DeepCopyInto(out *ImagePrunerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ImagePruner, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePrunerList. +func (in *ImagePrunerList) DeepCopy() *ImagePrunerList { + if in == nil { + return nil + } + out := new(ImagePrunerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImagePrunerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImagePrunerSpec) DeepCopyInto(out *ImagePrunerSpec) { + *out = *in + if in.Suspend != nil { + in, out := &in.Suspend, &out.Suspend + *out = new(bool) + **out = **in + } + if in.KeepTagRevisions != nil { + in, out := &in.KeepTagRevisions, &out.KeepTagRevisions + *out = new(int) + **out = **in + } + if in.KeepYoungerThan != nil { + in, out := &in.KeepYoungerThan, &out.KeepYoungerThan + *out = new(time.Duration) + **out = **in + } + if in.KeepYoungerThanDuration != nil { + in, out := &in.KeepYoungerThanDuration, &out.KeepYoungerThanDuration + *out = new(metav1.Duration) + **out = **in + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = new(corev1.ResourceRequirements) + (*in).DeepCopyInto(*out) + } + if in.Affinity != nil { + in, out := &in.Affinity, &out.Affinity + *out = new(corev1.Affinity) + (*in).DeepCopyInto(*out) + } + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]corev1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SuccessfulJobsHistoryLimit != nil { + in, out := &in.SuccessfulJobsHistoryLimit, &out.SuccessfulJobsHistoryLimit + *out = new(int32) + **out = **in + } + if in.FailedJobsHistoryLimit != nil { + in, out := &in.FailedJobsHistoryLimit, &out.FailedJobsHistoryLimit + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePrunerSpec. +func (in *ImagePrunerSpec) DeepCopy() *ImagePrunerSpec { + if in == nil { + return nil + } + out := new(ImagePrunerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImagePrunerStatus) DeepCopyInto(out *ImagePrunerStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]operatorv1.OperatorCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePrunerStatus. +func (in *ImagePrunerStatus) DeepCopy() *ImagePrunerStatus { + if in == nil { + return nil + } + out := new(ImagePrunerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageRegistryConfigProxy) DeepCopyInto(out *ImageRegistryConfigProxy) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageRegistryConfigProxy. +func (in *ImageRegistryConfigProxy) DeepCopy() *ImageRegistryConfigProxy { + if in == nil { + return nil + } + out := new(ImageRegistryConfigProxy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageRegistryConfigRequests) DeepCopyInto(out *ImageRegistryConfigRequests) { + *out = *in + out.Read = in.Read + out.Write = in.Write + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageRegistryConfigRequests. +func (in *ImageRegistryConfigRequests) DeepCopy() *ImageRegistryConfigRequests { + if in == nil { + return nil + } + out := new(ImageRegistryConfigRequests) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageRegistryConfigRequestsLimits) DeepCopyInto(out *ImageRegistryConfigRequestsLimits) { + *out = *in + out.MaxWaitInQueue = in.MaxWaitInQueue + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageRegistryConfigRequestsLimits. +func (in *ImageRegistryConfigRequestsLimits) DeepCopy() *ImageRegistryConfigRequestsLimits { + if in == nil { + return nil + } + out := new(ImageRegistryConfigRequestsLimits) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageRegistryConfigRoute) DeepCopyInto(out *ImageRegistryConfigRoute) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageRegistryConfigRoute. +func (in *ImageRegistryConfigRoute) DeepCopy() *ImageRegistryConfigRoute { + if in == nil { + return nil + } + out := new(ImageRegistryConfigRoute) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageRegistryConfigStorage) DeepCopyInto(out *ImageRegistryConfigStorage) { + *out = *in + if in.EmptyDir != nil { + in, out := &in.EmptyDir, &out.EmptyDir + *out = new(ImageRegistryConfigStorageEmptyDir) + **out = **in + } + if in.S3 != nil { + in, out := &in.S3, &out.S3 + *out = new(ImageRegistryConfigStorageS3) + (*in).DeepCopyInto(*out) + } + if in.GCS != nil { + in, out := &in.GCS, &out.GCS + *out = new(ImageRegistryConfigStorageGCS) + **out = **in + } + if in.Swift != nil { + in, out := &in.Swift, &out.Swift + *out = new(ImageRegistryConfigStorageSwift) + **out = **in + } + if in.PVC != nil { + in, out := &in.PVC, &out.PVC + *out = new(ImageRegistryConfigStoragePVC) + **out = **in + } + if in.Azure != nil { + in, out := &in.Azure, &out.Azure + *out = new(ImageRegistryConfigStorageAzure) + **out = **in + } + if in.IBMCOS != nil { + in, out := &in.IBMCOS, &out.IBMCOS + *out = new(ImageRegistryConfigStorageIBMCOS) + **out = **in + } + if in.OSS != nil { + in, out := &in.OSS, &out.OSS + *out = new(ImageRegistryConfigStorageAlibabaOSS) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageRegistryConfigStorage. +func (in *ImageRegistryConfigStorage) DeepCopy() *ImageRegistryConfigStorage { + if in == nil { + return nil + } + out := new(ImageRegistryConfigStorage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageRegistryConfigStorageAlibabaOSS) DeepCopyInto(out *ImageRegistryConfigStorageAlibabaOSS) { + *out = *in + if in.Encryption != nil { + in, out := &in.Encryption, &out.Encryption + *out = new(EncryptionAlibaba) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageRegistryConfigStorageAlibabaOSS. +func (in *ImageRegistryConfigStorageAlibabaOSS) DeepCopy() *ImageRegistryConfigStorageAlibabaOSS { + if in == nil { + return nil + } + out := new(ImageRegistryConfigStorageAlibabaOSS) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageRegistryConfigStorageAzure) DeepCopyInto(out *ImageRegistryConfigStorageAzure) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageRegistryConfigStorageAzure. +func (in *ImageRegistryConfigStorageAzure) DeepCopy() *ImageRegistryConfigStorageAzure { + if in == nil { + return nil + } + out := new(ImageRegistryConfigStorageAzure) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageRegistryConfigStorageEmptyDir) DeepCopyInto(out *ImageRegistryConfigStorageEmptyDir) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageRegistryConfigStorageEmptyDir. +func (in *ImageRegistryConfigStorageEmptyDir) DeepCopy() *ImageRegistryConfigStorageEmptyDir { + if in == nil { + return nil + } + out := new(ImageRegistryConfigStorageEmptyDir) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageRegistryConfigStorageGCS) DeepCopyInto(out *ImageRegistryConfigStorageGCS) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageRegistryConfigStorageGCS. +func (in *ImageRegistryConfigStorageGCS) DeepCopy() *ImageRegistryConfigStorageGCS { + if in == nil { + return nil + } + out := new(ImageRegistryConfigStorageGCS) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageRegistryConfigStorageIBMCOS) DeepCopyInto(out *ImageRegistryConfigStorageIBMCOS) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageRegistryConfigStorageIBMCOS. +func (in *ImageRegistryConfigStorageIBMCOS) DeepCopy() *ImageRegistryConfigStorageIBMCOS { + if in == nil { + return nil + } + out := new(ImageRegistryConfigStorageIBMCOS) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageRegistryConfigStoragePVC) DeepCopyInto(out *ImageRegistryConfigStoragePVC) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageRegistryConfigStoragePVC. +func (in *ImageRegistryConfigStoragePVC) DeepCopy() *ImageRegistryConfigStoragePVC { + if in == nil { + return nil + } + out := new(ImageRegistryConfigStoragePVC) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageRegistryConfigStorageS3) DeepCopyInto(out *ImageRegistryConfigStorageS3) { + *out = *in + if in.CloudFront != nil { + in, out := &in.CloudFront, &out.CloudFront + *out = new(ImageRegistryConfigStorageS3CloudFront) + (*in).DeepCopyInto(*out) + } + out.TrustedCA = in.TrustedCA + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageRegistryConfigStorageS3. +func (in *ImageRegistryConfigStorageS3) DeepCopy() *ImageRegistryConfigStorageS3 { + if in == nil { + return nil + } + out := new(ImageRegistryConfigStorageS3) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageRegistryConfigStorageS3CloudFront) DeepCopyInto(out *ImageRegistryConfigStorageS3CloudFront) { + *out = *in + in.PrivateKey.DeepCopyInto(&out.PrivateKey) + out.Duration = in.Duration + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageRegistryConfigStorageS3CloudFront. +func (in *ImageRegistryConfigStorageS3CloudFront) DeepCopy() *ImageRegistryConfigStorageS3CloudFront { + if in == nil { + return nil + } + out := new(ImageRegistryConfigStorageS3CloudFront) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageRegistryConfigStorageSwift) DeepCopyInto(out *ImageRegistryConfigStorageSwift) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageRegistryConfigStorageSwift. +func (in *ImageRegistryConfigStorageSwift) DeepCopy() *ImageRegistryConfigStorageSwift { + if in == nil { + return nil + } + out := new(ImageRegistryConfigStorageSwift) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageRegistrySpec) DeepCopyInto(out *ImageRegistrySpec) { + *out = *in + in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) + out.Proxy = in.Proxy + in.Storage.DeepCopyInto(&out.Storage) + out.Requests = in.Requests + if in.Routes != nil { + in, out := &in.Routes, &out.Routes + *out = make([]ImageRegistryConfigRoute, len(*in)) + copy(*out, *in) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = new(corev1.ResourceRequirements) + (*in).DeepCopyInto(*out) + } + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]corev1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Affinity != nil { + in, out := &in.Affinity, &out.Affinity + *out = new(corev1.Affinity) + (*in).DeepCopyInto(*out) + } + if in.TopologySpreadConstraints != nil { + in, out := &in.TopologySpreadConstraints, &out.TopologySpreadConstraints + *out = make([]corev1.TopologySpreadConstraint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageRegistrySpec. +func (in *ImageRegistrySpec) DeepCopy() *ImageRegistrySpec { + if in == nil { + return nil + } + out := new(ImageRegistrySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageRegistryStatus) DeepCopyInto(out *ImageRegistryStatus) { + *out = *in + in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) + in.Storage.DeepCopyInto(&out.Storage) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageRegistryStatus. +func (in *ImageRegistryStatus) DeepCopy() *ImageRegistryStatus { + if in == nil { + return nil + } + out := new(ImageRegistryStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KMSEncryptionAlibaba) DeepCopyInto(out *KMSEncryptionAlibaba) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KMSEncryptionAlibaba. +func (in *KMSEncryptionAlibaba) DeepCopy() *KMSEncryptionAlibaba { + if in == nil { + return nil + } + out := new(KMSEncryptionAlibaba) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3TrustedCASource) DeepCopyInto(out *S3TrustedCASource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3TrustedCASource. +func (in *S3TrustedCASource) DeepCopy() *S3TrustedCASource { + if in == nil { + return nil + } + out := new(S3TrustedCASource) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/api/imageregistry/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/imageregistry/v1/zz_generated.swagger_doc_generated.go new file mode 100644 index 000000000..391381ea0 --- /dev/null +++ b/vendor/github.com/openshift/api/imageregistry/v1/zz_generated.swagger_doc_generated.go @@ -0,0 +1,311 @@ +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_Config = map[string]string{ + "": "Config is the configuration object for a registry instance managed by the registry operator\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (Config) SwaggerDoc() map[string]string { + return map_Config +} + +var map_ConfigList = map[string]string{ + "": "ConfigList is a slice of Config objects.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (ConfigList) SwaggerDoc() map[string]string { + return map_ConfigList +} + +var map_EncryptionAlibaba = map[string]string{ + "": "EncryptionAlibaba this a union type in kube parlance. Depending on the value for the AlibabaEncryptionMethod, different pointers may be used", + "method": "Method defines the different encrytion modes available Empty value means no opinion and the platform chooses the a default, which is subject to change over time. Currently the default is `AES256`.", + "kms": "KMS (key management service) is an encryption type that holds the struct for KMS KeyID", +} + +func (EncryptionAlibaba) SwaggerDoc() map[string]string { + return map_EncryptionAlibaba +} + +var map_ImageRegistryConfigProxy = map[string]string{ + "": "ImageRegistryConfigProxy defines proxy configuration to be used by registry.", + "http": "http defines the proxy to be used by the image registry when accessing HTTP endpoints.", + "https": "https defines the proxy to be used by the image registry when accessing HTTPS endpoints.", + "noProxy": "noProxy defines a comma-separated list of host names that shouldn't go through any proxy.", +} + +func (ImageRegistryConfigProxy) SwaggerDoc() map[string]string { + return map_ImageRegistryConfigProxy +} + +var map_ImageRegistryConfigRequests = map[string]string{ + "": "ImageRegistryConfigRequests defines registry limits on requests read and write.", + "read": "read defines limits for image registry's reads.", + "write": "write defines limits for image registry's writes.", +} + +func (ImageRegistryConfigRequests) SwaggerDoc() map[string]string { + return map_ImageRegistryConfigRequests +} + +var map_ImageRegistryConfigRequestsLimits = map[string]string{ + "": "ImageRegistryConfigRequestsLimits holds configuration on the max, enqueued and waiting registry's API requests.", + "maxRunning": "maxRunning sets the maximum in flight api requests to the registry.", + "maxInQueue": "maxInQueue sets the maximum queued api requests to the registry.", + "maxWaitInQueue": "maxWaitInQueue sets the maximum time a request can wait in the queue before being rejected.", +} + +func (ImageRegistryConfigRequestsLimits) SwaggerDoc() map[string]string { + return map_ImageRegistryConfigRequestsLimits +} + +var map_ImageRegistryConfigRoute = map[string]string{ + "": "ImageRegistryConfigRoute holds information on external route access to image registry.", + "name": "name of the route to be created.", + "hostname": "hostname for the route.", + "secretName": "secretName points to secret containing the certificates to be used by the route.", +} + +func (ImageRegistryConfigRoute) SwaggerDoc() map[string]string { + return map_ImageRegistryConfigRoute +} + +var map_ImageRegistryConfigStorage = map[string]string{ + "": "ImageRegistryConfigStorage describes how the storage should be configured for the image registry.", + "emptyDir": "emptyDir represents ephemeral storage on the pod's host node. WARNING: this storage cannot be used with more than 1 replica and is not suitable for production use. When the pod is removed from a node for any reason, the data in the emptyDir is deleted forever.", + "s3": "s3 represents configuration that uses Amazon Simple Storage Service.", + "gcs": "gcs represents configuration that uses Google Cloud Storage.", + "swift": "swift represents configuration that uses OpenStack Object Storage.", + "pvc": "pvc represents configuration that uses a PersistentVolumeClaim.", + "azure": "azure represents configuration that uses Azure Blob Storage.", + "ibmcos": "ibmcos represents configuration that uses IBM Cloud Object Storage.", + "oss": "Oss represents configuration that uses Alibaba Cloud Object Storage Service.", + "managementState": "managementState indicates if the operator manages the underlying storage unit. If Managed the operator will remove the storage when this operator gets Removed.", +} + +func (ImageRegistryConfigStorage) SwaggerDoc() map[string]string { + return map_ImageRegistryConfigStorage +} + +var map_ImageRegistryConfigStorageAlibabaOSS = map[string]string{ + "": "ImageRegistryConfigStorageAlibabaOSS holds Alibaba Cloud OSS configuration. Configures the registry to use Alibaba Cloud Object Storage Service for backend storage. More about oss, you can look at the [official documentation](https://www.alibabacloud.com/help/product/31815.htm)", + "bucket": "Bucket is the bucket name in which you want to store the registry's data. About Bucket naming, more details you can look at the [official documentation](https://www.alibabacloud.com/help/doc-detail/257087.htm) Empty value means no opinion and the platform chooses the a default, which is subject to change over time. Currently the default will be autogenerated in the form of -image-registry--", + "region": "Region is the Alibaba Cloud Region in which your bucket exists. For a list of regions, you can look at the [official documentation](https://www.alibabacloud.com/help/doc-detail/31837.html). Empty value means no opinion and the platform chooses the a default, which is subject to change over time. Currently the default will be based on the installed Alibaba Cloud Region.", + "endpointAccessibility": "EndpointAccessibility specifies whether the registry use the OSS VPC internal endpoint Empty value means no opinion and the platform chooses the a default, which is subject to change over time. Currently the default is `Internal`.", + "encryption": "Encryption specifies whether you would like your data encrypted on the server side. More details, you can look cat the [official documentation](https://www.alibabacloud.com/help/doc-detail/117914.htm)", +} + +func (ImageRegistryConfigStorageAlibabaOSS) SwaggerDoc() map[string]string { + return map_ImageRegistryConfigStorageAlibabaOSS +} + +var map_ImageRegistryConfigStorageAzure = map[string]string{ + "": "ImageRegistryConfigStorageAzure holds the information to configure the registry to use Azure Blob Storage for backend storage.", + "accountName": "accountName defines the account to be used by the registry.", + "container": "container defines Azure's container to be used by registry.", + "cloudName": "cloudName is the name of the Azure cloud environment to be used by the registry. If empty, the operator will set it based on the infrastructure object.", +} + +func (ImageRegistryConfigStorageAzure) SwaggerDoc() map[string]string { + return map_ImageRegistryConfigStorageAzure +} + +var map_ImageRegistryConfigStorageEmptyDir = map[string]string{ + "": "ImageRegistryConfigStorageEmptyDir is an place holder to be used when when registry is leveraging ephemeral storage.", +} + +func (ImageRegistryConfigStorageEmptyDir) SwaggerDoc() map[string]string { + return map_ImageRegistryConfigStorageEmptyDir +} + +var map_ImageRegistryConfigStorageGCS = map[string]string{ + "": "ImageRegistryConfigStorageGCS holds GCS configuration.", + "bucket": "bucket is the bucket name in which you want to store the registry's data. Optional, will be generated if not provided.", + "region": "region is the GCS location in which your bucket exists. Optional, will be set based on the installed GCS Region.", + "projectID": "projectID is the Project ID of the GCP project that this bucket should be associated with.", + "keyID": "keyID is the KMS key ID to use for encryption. Optional, buckets are encrypted by default on GCP. This allows for the use of a custom encryption key.", +} + +func (ImageRegistryConfigStorageGCS) SwaggerDoc() map[string]string { + return map_ImageRegistryConfigStorageGCS +} + +var map_ImageRegistryConfigStorageIBMCOS = map[string]string{ + "": "ImageRegistryConfigStorageIBMCOS holds the information to configure the registry to use IBM Cloud Object Storage for backend storage.", + "bucket": "bucket is the bucket name in which you want to store the registry's data. Optional, will be generated if not provided.", + "location": "location is the IBM Cloud location in which your bucket exists. Optional, will be set based on the installed IBM Cloud location.", + "resourceGroupName": "resourceGroupName is the name of the IBM Cloud resource group that this bucket and its service instance is associated with. Optional, will be set based on the installed IBM Cloud resource group.", + "resourceKeyCRN": "resourceKeyCRN is the CRN of the IBM Cloud resource key that is created for the service instance. Commonly referred as a service credential and must contain HMAC type credentials. Optional, will be computed if not provided.", + "serviceInstanceCRN": "serviceInstanceCRN is the CRN of the IBM Cloud Object Storage service instance that this bucket is associated with. Optional, will be computed if not provided.", +} + +func (ImageRegistryConfigStorageIBMCOS) SwaggerDoc() map[string]string { + return map_ImageRegistryConfigStorageIBMCOS +} + +var map_ImageRegistryConfigStoragePVC = map[string]string{ + "": "ImageRegistryConfigStoragePVC holds Persistent Volume Claims data to be used by the registry.", + "claim": "claim defines the Persisent Volume Claim's name to be used.", +} + +func (ImageRegistryConfigStoragePVC) SwaggerDoc() map[string]string { + return map_ImageRegistryConfigStoragePVC +} + +var map_ImageRegistryConfigStorageS3 = map[string]string{ + "": "ImageRegistryConfigStorageS3 holds the information to configure the registry to use the AWS S3 service for backend storage https://docs.docker.com/registry/storage-drivers/s3/", + "bucket": "bucket is the bucket name in which you want to store the registry's data. Optional, will be generated if not provided.", + "region": "region is the AWS region in which your bucket exists. Optional, will be set based on the installed AWS Region.", + "regionEndpoint": "regionEndpoint is the endpoint for S3 compatible storage services. It should be a valid URL with scheme, e.g. https://s3.example.com. Optional, defaults based on the Region that is provided.", + "encrypt": "encrypt specifies whether the registry stores the image in encrypted format or not. Optional, defaults to false.", + "keyID": "keyID is the KMS key ID to use for encryption. Optional, Encrypt must be true, or this parameter is ignored.", + "cloudFront": "cloudFront configures Amazon Cloudfront as the storage middleware in a registry.", + "virtualHostedStyle": "virtualHostedStyle enables using S3 virtual hosted style bucket paths with a custom RegionEndpoint Optional, defaults to false.", + "trustedCA": "trustedCA is a reference to a config map containing a CA bundle. The image registry and its operator use certificates from this bundle to verify S3 server certificates.\n\nThe namespace for the config map referenced by trustedCA is \"openshift-config\". The key for the bundle in the config map is \"ca-bundle.crt\".", +} + +func (ImageRegistryConfigStorageS3) SwaggerDoc() map[string]string { + return map_ImageRegistryConfigStorageS3 +} + +var map_ImageRegistryConfigStorageS3CloudFront = map[string]string{ + "": "ImageRegistryConfigStorageS3CloudFront holds the configuration to use Amazon Cloudfront as the storage middleware in a registry. https://docs.docker.com/registry/configuration/#cloudfront", + "baseURL": "baseURL contains the SCHEME://HOST[/PATH] at which Cloudfront is served.", + "privateKey": "privateKey points to secret containing the private key, provided by AWS.", + "keypairID": "keypairID is key pair ID provided by AWS.", + "duration": "duration is the duration of the Cloudfront session.", +} + +func (ImageRegistryConfigStorageS3CloudFront) SwaggerDoc() map[string]string { + return map_ImageRegistryConfigStorageS3CloudFront +} + +var map_ImageRegistryConfigStorageSwift = map[string]string{ + "": "ImageRegistryConfigStorageSwift holds the information to configure the registry to use the OpenStack Swift service for backend storage https://docs.docker.com/registry/storage-drivers/swift/", + "authURL": "authURL defines the URL for obtaining an authentication token.", + "authVersion": "authVersion specifies the OpenStack Auth's version.", + "container": "container defines the name of Swift container where to store the registry's data.", + "domain": "domain specifies Openstack's domain name for Identity v3 API.", + "domainID": "domainID specifies Openstack's domain id for Identity v3 API.", + "tenant": "tenant defines Openstack tenant name to be used by registry.", + "tenantID": "tenant defines Openstack tenant id to be used by registry.", + "regionName": "regionName defines Openstack's region in which container exists.", +} + +func (ImageRegistryConfigStorageSwift) SwaggerDoc() map[string]string { + return map_ImageRegistryConfigStorageSwift +} + +var map_ImageRegistrySpec = map[string]string{ + "": "ImageRegistrySpec defines the specs for the running registry.", + "httpSecret": "httpSecret is the value needed by the registry to secure uploads, generated by default.", + "proxy": "proxy defines the proxy to be used when calling master api, upstream registries, etc.", + "storage": "storage details for configuring registry storage, e.g. S3 bucket coordinates.", + "readOnly": "readOnly indicates whether the registry instance should reject attempts to push new images or delete existing ones.", + "disableRedirect": "disableRedirect controls whether to route all data through the Registry, rather than redirecting to the backend.", + "requests": "requests controls how many parallel requests a given registry instance will handle before queuing additional requests.", + "defaultRoute": "defaultRoute indicates whether an external facing route for the registry should be created using the default generated hostname.", + "routes": "routes defines additional external facing routes which should be created for the registry.", + "replicas": "replicas determines the number of registry instances to run.", + "logging": "logging is deprecated, use logLevel instead.", + "resources": "resources defines the resource requests+limits for the registry pod.", + "nodeSelector": "nodeSelector defines the node selection constraints for the registry pod.", + "tolerations": "tolerations defines the tolerations for the registry pod.", + "rolloutStrategy": "rolloutStrategy defines rollout strategy for the image registry deployment.", + "affinity": "affinity is a group of node affinity scheduling rules for the image registry pod(s).", + "topologySpreadConstraints": "topologySpreadConstraints specify how to spread matching pods among the given topology.", +} + +func (ImageRegistrySpec) SwaggerDoc() map[string]string { + return map_ImageRegistrySpec +} + +var map_ImageRegistryStatus = map[string]string{ + "": "ImageRegistryStatus reports image registry operational status.", + "storageManaged": "storageManaged is deprecated, please refer to Storage.managementState", + "storage": "storage indicates the current applied storage configuration of the registry.", +} + +func (ImageRegistryStatus) SwaggerDoc() map[string]string { + return map_ImageRegistryStatus +} + +var map_KMSEncryptionAlibaba = map[string]string{ + "keyID": "KeyID holds the KMS encryption key ID", +} + +func (KMSEncryptionAlibaba) SwaggerDoc() map[string]string { + return map_KMSEncryptionAlibaba +} + +var map_S3TrustedCASource = map[string]string{ + "": "S3TrustedCASource references a config map with a CA certificate bundle in the \"openshift-config\" namespace. The key for the bundle in the config map is \"ca-bundle.crt\".", + "name": "name is the metadata.name of the referenced config map. This field must adhere to standard config map naming restrictions. The name must consist solely of alphanumeric characters, hyphens (-) and periods (.). It has a maximum length of 253 characters. If this field is not specified or is empty string, the default trust bundle will be used.", +} + +func (S3TrustedCASource) SwaggerDoc() map[string]string { + return map_S3TrustedCASource +} + +var map_ImagePruner = map[string]string{ + "": "ImagePruner is the configuration object for an image registry pruner managed by the registry operator.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (ImagePruner) SwaggerDoc() map[string]string { + return map_ImagePruner +} + +var map_ImagePrunerList = map[string]string{ + "": "ImagePrunerList is a slice of ImagePruner objects.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (ImagePrunerList) SwaggerDoc() map[string]string { + return map_ImagePrunerList +} + +var map_ImagePrunerSpec = map[string]string{ + "": "ImagePrunerSpec defines the specs for the running image pruner.", + "schedule": "schedule specifies when to execute the job using standard cronjob syntax: https://wikipedia.org/wiki/Cron. Defaults to `0 0 * * *`.", + "suspend": "suspend specifies whether or not to suspend subsequent executions of this cronjob. Defaults to false.", + "keepTagRevisions": "keepTagRevisions specifies the number of image revisions for a tag in an image stream that will be preserved. Defaults to 3.", + "keepYoungerThan": "keepYoungerThan specifies the minimum age in nanoseconds of an image and its referrers for it to be considered a candidate for pruning. DEPRECATED: This field is deprecated in favor of keepYoungerThanDuration. If both are set, this field is ignored and keepYoungerThanDuration takes precedence.", + "keepYoungerThanDuration": "keepYoungerThanDuration specifies the minimum age of an image and its referrers for it to be considered a candidate for pruning. Defaults to 60m (60 minutes).", + "resources": "resources defines the resource requests and limits for the image pruner pod.", + "affinity": "affinity is a group of node affinity scheduling rules for the image pruner pod.", + "nodeSelector": "nodeSelector defines the node selection constraints for the image pruner pod.", + "tolerations": "tolerations defines the node tolerations for the image pruner pod.", + "successfulJobsHistoryLimit": "successfulJobsHistoryLimit specifies how many successful image pruner jobs to retain. Defaults to 3 if not set.", + "failedJobsHistoryLimit": "failedJobsHistoryLimit specifies how many failed image pruner jobs to retain. Defaults to 3 if not set.", + "ignoreInvalidImageReferences": "ignoreInvalidImageReferences indicates whether the pruner can ignore errors while parsing image references.", + "logLevel": "logLevel sets the level of log output for the pruner job.\n\nValid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\".", +} + +func (ImagePrunerSpec) SwaggerDoc() map[string]string { + return map_ImagePrunerSpec +} + +var map_ImagePrunerStatus = map[string]string{ + "": "ImagePrunerStatus reports image pruner operational status.", + "observedGeneration": "observedGeneration is the last generation change that has been applied.", + "conditions": "conditions is a list of conditions and their status.", +} + +func (ImagePrunerStatus) SwaggerDoc() map[string]string { + return map_ImagePrunerStatus +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/api/install.go b/vendor/github.com/openshift/api/install.go new file mode 100644 index 000000000..d7668b3c0 --- /dev/null +++ b/vendor/github.com/openshift/api/install.go @@ -0,0 +1,169 @@ +package api + +import ( + kadmissionv1 "k8s.io/api/admission/v1" + kadmissionv1beta1 "k8s.io/api/admission/v1beta1" + kadmissionregistrationv1 "k8s.io/api/admissionregistration/v1" + kadmissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" + kappsv1 "k8s.io/api/apps/v1" + kappsv1beta1 "k8s.io/api/apps/v1beta1" + kappsv1beta2 "k8s.io/api/apps/v1beta2" + kauthenticationv1 "k8s.io/api/authentication/v1" + kauthenticationv1beta1 "k8s.io/api/authentication/v1beta1" + kauthorizationv1 "k8s.io/api/authorization/v1" + kauthorizationv1beta1 "k8s.io/api/authorization/v1beta1" + kautoscalingv1 "k8s.io/api/autoscaling/v1" + kautoscalingv2 "k8s.io/api/autoscaling/v2" + kautoscalingv2beta1 "k8s.io/api/autoscaling/v2beta1" + kautoscalingv2beta2 "k8s.io/api/autoscaling/v2beta2" + kbatchv1 "k8s.io/api/batch/v1" + kbatchv1beta1 "k8s.io/api/batch/v1beta1" + kcertificatesv1 "k8s.io/api/certificates/v1" + kcertificatesv1beta1 "k8s.io/api/certificates/v1beta1" + kcoordinationv1 "k8s.io/api/coordination/v1" + kcoordinationv1beta1 "k8s.io/api/coordination/v1beta1" + kcorev1 "k8s.io/api/core/v1" + keventsv1 "k8s.io/api/events/v1" + keventsv1beta1 "k8s.io/api/events/v1beta1" + kextensionsv1beta1 "k8s.io/api/extensions/v1beta1" + kflowcontrolv1alpha1 "k8s.io/api/flowcontrol/v1alpha1" + kflowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1" + kflowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2" + kimagepolicyv1alpha1 "k8s.io/api/imagepolicy/v1alpha1" + knetworkingv1 "k8s.io/api/networking/v1" + knetworkingv1beta1 "k8s.io/api/networking/v1beta1" + knodev1 "k8s.io/api/node/v1" + knodev1alpha1 "k8s.io/api/node/v1alpha1" + knodev1beta1 "k8s.io/api/node/v1beta1" + kpolicyv1 "k8s.io/api/policy/v1" + kpolicyv1beta1 "k8s.io/api/policy/v1beta1" + krbacv1 "k8s.io/api/rbac/v1" + krbacv1alpha1 "k8s.io/api/rbac/v1alpha1" + krbacv1beta1 "k8s.io/api/rbac/v1beta1" + kschedulingv1 "k8s.io/api/scheduling/v1" + kschedulingv1alpha1 "k8s.io/api/scheduling/v1alpha1" + kschedulingv1beta1 "k8s.io/api/scheduling/v1beta1" + kstoragev1 "k8s.io/api/storage/v1" + kstoragev1alpha1 "k8s.io/api/storage/v1alpha1" + kstoragev1beta1 "k8s.io/api/storage/v1beta1" + "k8s.io/apimachinery/pkg/runtime" + + "github.com/openshift/api/apiserver" + "github.com/openshift/api/apps" + "github.com/openshift/api/authorization" + "github.com/openshift/api/build" + "github.com/openshift/api/cloudnetwork" + "github.com/openshift/api/config" + "github.com/openshift/api/console" + "github.com/openshift/api/helm" + "github.com/openshift/api/image" + "github.com/openshift/api/imageregistry" + "github.com/openshift/api/kubecontrolplane" + "github.com/openshift/api/machine" + "github.com/openshift/api/monitoring" + "github.com/openshift/api/network" + "github.com/openshift/api/networkoperator" + "github.com/openshift/api/oauth" + "github.com/openshift/api/openshiftcontrolplane" + "github.com/openshift/api/operator" + "github.com/openshift/api/operatorcontrolplane" + "github.com/openshift/api/osin" + "github.com/openshift/api/project" + "github.com/openshift/api/quota" + "github.com/openshift/api/route" + "github.com/openshift/api/samples" + "github.com/openshift/api/security" + "github.com/openshift/api/servicecertsigner" + "github.com/openshift/api/sharedresource" + "github.com/openshift/api/template" + "github.com/openshift/api/user" + + // just make sure this compiles. Don't add it to a scheme + _ "github.com/openshift/api/legacyconfig/v1" +) + +var ( + schemeBuilder = runtime.NewSchemeBuilder( + apiserver.Install, + apps.Install, + authorization.Install, + build.Install, + config.Install, + console.Install, + helm.Install, + image.Install, + imageregistry.Install, + kubecontrolplane.Install, + cloudnetwork.Install, + network.Install, + networkoperator.Install, + oauth.Install, + openshiftcontrolplane.Install, + operator.Install, + operatorcontrolplane.Install, + osin.Install, + project.Install, + quota.Install, + route.Install, + samples.Install, + security.Install, + servicecertsigner.Install, + sharedresource.Install, + template.Install, + user.Install, + machine.Install, + monitoring.Install, + ) + // Install is a function which adds every version of every openshift group to a scheme + Install = schemeBuilder.AddToScheme + + kubeSchemeBuilder = runtime.NewSchemeBuilder( + kadmissionv1.AddToScheme, + kadmissionv1beta1.AddToScheme, + kadmissionregistrationv1.AddToScheme, + kadmissionregistrationv1beta1.AddToScheme, + kappsv1.AddToScheme, + kappsv1beta1.AddToScheme, + kappsv1beta2.AddToScheme, + kauthenticationv1.AddToScheme, + kauthenticationv1beta1.AddToScheme, + kauthorizationv1.AddToScheme, + kauthorizationv1beta1.AddToScheme, + kautoscalingv1.AddToScheme, + kautoscalingv2.AddToScheme, + kautoscalingv2beta1.AddToScheme, + kautoscalingv2beta2.AddToScheme, + kbatchv1.AddToScheme, + kbatchv1beta1.AddToScheme, + kcertificatesv1.AddToScheme, + kcertificatesv1beta1.AddToScheme, + kcorev1.AddToScheme, + kcoordinationv1.AddToScheme, + kcoordinationv1beta1.AddToScheme, + keventsv1.AddToScheme, + keventsv1beta1.AddToScheme, + kextensionsv1beta1.AddToScheme, + kflowcontrolv1alpha1.AddToScheme, + kflowcontrolv1beta1.AddToScheme, + kflowcontrolv1beta2.AddToScheme, + kimagepolicyv1alpha1.AddToScheme, + knetworkingv1.AddToScheme, + knetworkingv1beta1.AddToScheme, + knodev1.AddToScheme, + knodev1alpha1.AddToScheme, + knodev1beta1.AddToScheme, + kpolicyv1.AddToScheme, + kpolicyv1beta1.AddToScheme, + krbacv1.AddToScheme, + krbacv1beta1.AddToScheme, + krbacv1alpha1.AddToScheme, + kschedulingv1.AddToScheme, + kschedulingv1alpha1.AddToScheme, + kschedulingv1beta1.AddToScheme, + kstoragev1.AddToScheme, + kstoragev1beta1.AddToScheme, + kstoragev1alpha1.AddToScheme, + ) + // InstallKube is a way to install all the external k8s.io/api types + InstallKube = kubeSchemeBuilder.AddToScheme +) diff --git a/vendor/github.com/openshift/api/kubecontrolplane/.codegen.yaml b/vendor/github.com/openshift/api/kubecontrolplane/.codegen.yaml new file mode 100644 index 000000000..ffa2c8d9b --- /dev/null +++ b/vendor/github.com/openshift/api/kubecontrolplane/.codegen.yaml @@ -0,0 +1,2 @@ +swaggerdocs: + commentPolicy: Warn diff --git a/vendor/github.com/openshift/api/kubecontrolplane/install.go b/vendor/github.com/openshift/api/kubecontrolplane/install.go new file mode 100644 index 000000000..c34b77723 --- /dev/null +++ b/vendor/github.com/openshift/api/kubecontrolplane/install.go @@ -0,0 +1,26 @@ +package kubecontrolplane + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + kubecontrolplanev1 "github.com/openshift/api/kubecontrolplane/v1" +) + +const ( + GroupName = "kubecontrolplane.config.openshift.io" +) + +var ( + schemeBuilder = runtime.NewSchemeBuilder(kubecontrolplanev1.Install) + // Install is a function which adds every version of this group to a scheme + Install = schemeBuilder.AddToScheme +) + +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +func Kind(kind string) schema.GroupKind { + return schema.GroupKind{Group: GroupName, Kind: kind} +} diff --git a/vendor/github.com/openshift/api/kubecontrolplane/v1/doc.go b/vendor/github.com/openshift/api/kubecontrolplane/v1/doc.go new file mode 100644 index 000000000..d8872a613 --- /dev/null +++ b/vendor/github.com/openshift/api/kubecontrolplane/v1/doc.go @@ -0,0 +1,7 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true + +// +groupName=kubecontrolplane.config.openshift.io +// Package v1 is the v1 version of the API. +package v1 diff --git a/vendor/github.com/openshift/api/kubecontrolplane/v1/register.go b/vendor/github.com/openshift/api/kubecontrolplane/v1/register.go new file mode 100644 index 000000000..f8abc8ad8 --- /dev/null +++ b/vendor/github.com/openshift/api/kubecontrolplane/v1/register.go @@ -0,0 +1,38 @@ +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" + osinv1 "github.com/openshift/api/osin/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + GroupName = "kubecontrolplane.config.openshift.io" + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, osinv1.Install, configv1.Install) + // Install is a function which adds this version to a scheme + Install = schemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = schemeBuilder.AddToScheme +) + +// Resource generated code relies on this being here, but it logically belongs to the group +// DEPRECATED +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &KubeAPIServerConfig{}, + &KubeControllerManagerConfig{}, + ) + return nil +} diff --git a/vendor/github.com/openshift/api/kubecontrolplane/v1/types.go b/vendor/github.com/openshift/api/kubecontrolplane/v1/types.go new file mode 100644 index 000000000..aae8f464c --- /dev/null +++ b/vendor/github.com/openshift/api/kubecontrolplane/v1/types.go @@ -0,0 +1,219 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "fmt" + + configv1 "github.com/openshift/api/config/v1" + osinv1 "github.com/openshift/api/osin/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +// +openshift:compatibility-gen:internal +type KubeAPIServerConfig struct { + metav1.TypeMeta `json:",inline"` + + // provides the standard apiserver configuration + configv1.GenericAPIServerConfig `json:",inline"` + + // authConfig configures authentication options in addition to the standard + // oauth token and client certificate authenticators + AuthConfig MasterAuthConfig `json:"authConfig"` + + // aggregatorConfig has options for configuring the aggregator component of the API server. + AggregatorConfig AggregatorConfig `json:"aggregatorConfig"` + + // kubeletClientInfo contains information about how to connect to kubelets + KubeletClientInfo KubeletConnectionInfo `json:"kubeletClientInfo"` + + // servicesSubnet is the subnet to use for assigning service IPs + ServicesSubnet string `json:"servicesSubnet"` + // servicesNodePortRange is the range to use for assigning service public ports on a host. + ServicesNodePortRange string `json:"servicesNodePortRange"` + + // DEPRECATED: consolePublicURL has been deprecated and setting it has no effect. + ConsolePublicURL string `json:"consolePublicURL"` + + // UserAgentMatchingConfig controls how API calls from *voluntarily* identifying clients will be handled. THIS DOES NOT DEFEND AGAINST MALICIOUS CLIENTS! + // TODO I think we should just drop this feature. + UserAgentMatchingConfig UserAgentMatchingConfig `json:"userAgentMatchingConfig"` + + // imagePolicyConfig feeds the image policy admission plugin + // TODO make it an admission plugin config + ImagePolicyConfig KubeAPIServerImagePolicyConfig `json:"imagePolicyConfig"` + + // projectConfig feeds an admission plugin + // TODO make it an admission plugin config + ProjectConfig KubeAPIServerProjectConfig `json:"projectConfig"` + + // serviceAccountPublicKeyFiles is a list of files, each containing a PEM-encoded public RSA key. + // (If any file contains a private key, the public portion of the key is used) + // The list of public keys is used to verify presented service account tokens. + // Each key is tried in order until the list is exhausted or verification succeeds. + // If no keys are specified, no service account authentication will be available. + ServiceAccountPublicKeyFiles []string `json:"serviceAccountPublicKeyFiles"` + + // oauthConfig, if present start the /oauth endpoint in this process + OAuthConfig *osinv1.OAuthConfig `json:"oauthConfig"` + + // TODO this needs to be removed. + APIServerArguments map[string]Arguments `json:"apiServerArguments"` +} + +// Arguments masks the value so protobuf can generate +// +protobuf.nullable=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +type Arguments []string + +func (t Arguments) String() string { + return fmt.Sprintf("%v", []string(t)) +} + +type KubeAPIServerImagePolicyConfig struct { + // internalRegistryHostname sets the hostname for the default internal image + // registry. The value must be in "hostname[:port]" format. + // For backward compatibility, users can still use OPENSHIFT_DEFAULT_REGISTRY + // environment variable but this setting overrides the environment variable. + InternalRegistryHostname string `json:"internalRegistryHostname"` + // externalRegistryHostnames provides the hostnames for the default external image + // registry. The external hostname should be set only when the image registry + // is exposed externally. The first value is used in 'publicDockerImageRepository' + // field in ImageStreams. The value must be in "hostname[:port]" format. + ExternalRegistryHostnames []string `json:"externalRegistryHostnames"` +} + +type KubeAPIServerProjectConfig struct { + // defaultNodeSelector holds default project node label selector + DefaultNodeSelector string `json:"defaultNodeSelector"` +} + +// KubeletConnectionInfo holds information necessary for connecting to a kubelet +type KubeletConnectionInfo struct { + // port is the port to connect to kubelets on + Port uint32 `json:"port"` + // ca is the CA for verifying TLS connections to kubelets + CA string `json:"ca"` + // CertInfo is the TLS client cert information for securing communication to kubelets + // this is anonymous so that we can inline it for serialization + configv1.CertInfo `json:",inline"` +} + +// UserAgentMatchingConfig controls how API calls from *voluntarily* identifying clients will be handled. THIS DOES NOT DEFEND AGAINST MALICIOUS CLIENTS! +type UserAgentMatchingConfig struct { + // requiredClients if this list is non-empty, then a User-Agent must match one of the UserAgentRegexes to be allowed + RequiredClients []UserAgentMatchRule `json:"requiredClients"` + + // deniedClients if this list is non-empty, then a User-Agent must not match any of the UserAgentRegexes + DeniedClients []UserAgentDenyRule `json:"deniedClients"` + + // defaultRejectionMessage is the message shown when rejecting a client. If it is not a set, a generic message is given. + DefaultRejectionMessage string `json:"defaultRejectionMessage"` +} + +// UserAgentMatchRule describes how to match a given request based on User-Agent and HTTPVerb +type UserAgentMatchRule struct { + // regex is a regex that is checked against the User-Agent. + // Known variants of oc clients + // 1. oc accessing kube resources: oc/v1.2.0 (linux/amd64) kubernetes/bc4550d + // 2. oc accessing openshift resources: oc/v1.1.3 (linux/amd64) openshift/b348c2f + // 3. openshift kubectl accessing kube resources: openshift/v1.2.0 (linux/amd64) kubernetes/bc4550d + // 4. openshift kubectl accessing openshift resources: openshift/v1.1.3 (linux/amd64) openshift/b348c2f + // 5. oadm accessing kube resources: oadm/v1.2.0 (linux/amd64) kubernetes/bc4550d + // 6. oadm accessing openshift resources: oadm/v1.1.3 (linux/amd64) openshift/b348c2f + // 7. openshift cli accessing kube resources: openshift/v1.2.0 (linux/amd64) kubernetes/bc4550d + // 8. openshift cli accessing openshift resources: openshift/v1.1.3 (linux/amd64) openshift/b348c2f + Regex string `json:"regex"` + + // httpVerbs specifies which HTTP verbs should be matched. An empty list means "match all verbs". + HTTPVerbs []string `json:"httpVerbs"` +} + +// UserAgentDenyRule adds a rejection message that can be used to help a user figure out how to get an approved client +type UserAgentDenyRule struct { + UserAgentMatchRule `json:",inline"` + + // RejectionMessage is the message shown when rejecting a client. If it is not a set, the default message is used. + RejectionMessage string `json:"rejectionMessage"` +} + +// MasterAuthConfig configures authentication options in addition to the standard +// oauth token and client certificate authenticators +type MasterAuthConfig struct { + // requestHeader holds options for setting up a front proxy against the API. It is optional. + RequestHeader *RequestHeaderAuthenticationOptions `json:"requestHeader"` + // webhookTokenAuthenticators, if present configures remote token reviewers + WebhookTokenAuthenticators []WebhookTokenAuthenticator `json:"webhookTokenAuthenticators"` + // oauthMetadataFile is a path to a file containing the discovery endpoint for OAuth 2.0 Authorization + // Server Metadata for an external OAuth server. + // See IETF Draft: // https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 + // This option is mutually exclusive with OAuthConfig + OAuthMetadataFile string `json:"oauthMetadataFile"` +} + +// WebhookTokenAuthenticators holds the necessary configuation options for +// external token authenticators +type WebhookTokenAuthenticator struct { + // configFile is a path to a Kubeconfig file with the webhook configuration + ConfigFile string `json:"configFile"` + // cacheTTL indicates how long an authentication result should be cached. + // It takes a valid time duration string (e.g. "5m"). + // If empty, you get a default timeout of 2 minutes. + // If zero (e.g. "0m"), caching is disabled + CacheTTL string `json:"cacheTTL"` +} + +// RequestHeaderAuthenticationOptions provides options for setting up a front proxy against the entire +// API instead of against the /oauth endpoint. +type RequestHeaderAuthenticationOptions struct { + // clientCA is a file with the trusted signer certs. It is required. + ClientCA string `json:"clientCA"` + // clientCommonNames is a required list of common names to require a match from. + ClientCommonNames []string `json:"clientCommonNames"` + + // usernameHeaders is the list of headers to check for user information. First hit wins. + UsernameHeaders []string `json:"usernameHeaders"` + // groupHeaders is the set of headers to check for group information. All are unioned. + GroupHeaders []string `json:"groupHeaders"` + // extraHeaderPrefixes is the set of request header prefixes to inspect for user extra. X-Remote-Extra- is suggested. + ExtraHeaderPrefixes []string `json:"extraHeaderPrefixes"` +} + +// AggregatorConfig holds information required to make the aggregator function. +type AggregatorConfig struct { + // proxyClientInfo specifies the client cert/key to use when proxying to aggregated API servers + ProxyClientInfo configv1.CertInfo `json:"proxyClientInfo"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +// +openshift:compatibility-gen:internal +type KubeControllerManagerConfig struct { + metav1.TypeMeta `json:",inline"` + + // serviceServingCert provides support for the old alpha service serving cert signer CA bundle + ServiceServingCert ServiceServingCert `json:"serviceServingCert"` + + // projectConfig is an optimization for the daemonset controller + ProjectConfig KubeControllerManagerProjectConfig `json:"projectConfig"` + + // extendedArguments is used to configure the kube-controller-manager + ExtendedArguments map[string]Arguments `json:"extendedArguments"` +} + +type KubeControllerManagerProjectConfig struct { + // defaultNodeSelector holds default project node label selector + DefaultNodeSelector string `json:"defaultNodeSelector"` +} + +// ServiceServingCert holds configuration for service serving cert signer which creates cert/key pairs for +// pods fulfilling a service to serve with. +type ServiceServingCert struct { + // CertFile is a file containing a PEM-encoded certificate + CertFile string `json:"certFile"` +} diff --git a/vendor/github.com/openshift/api/kubecontrolplane/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/kubecontrolplane/v1/zz_generated.deepcopy.go new file mode 100644 index 000000000..e4378aa52 --- /dev/null +++ b/vendor/github.com/openshift/api/kubecontrolplane/v1/zz_generated.deepcopy.go @@ -0,0 +1,379 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + osinv1 "github.com/openshift/api/osin/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AggregatorConfig) DeepCopyInto(out *AggregatorConfig) { + *out = *in + out.ProxyClientInfo = in.ProxyClientInfo + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AggregatorConfig. +func (in *AggregatorConfig) DeepCopy() *AggregatorConfig { + if in == nil { + return nil + } + out := new(AggregatorConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in Arguments) DeepCopyInto(out *Arguments) { + { + in := &in + *out = make(Arguments, len(*in)) + copy(*out, *in) + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Arguments. +func (in Arguments) DeepCopy() Arguments { + if in == nil { + return nil + } + out := new(Arguments) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeAPIServerConfig) DeepCopyInto(out *KubeAPIServerConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + in.GenericAPIServerConfig.DeepCopyInto(&out.GenericAPIServerConfig) + in.AuthConfig.DeepCopyInto(&out.AuthConfig) + out.AggregatorConfig = in.AggregatorConfig + out.KubeletClientInfo = in.KubeletClientInfo + in.UserAgentMatchingConfig.DeepCopyInto(&out.UserAgentMatchingConfig) + in.ImagePolicyConfig.DeepCopyInto(&out.ImagePolicyConfig) + out.ProjectConfig = in.ProjectConfig + if in.ServiceAccountPublicKeyFiles != nil { + in, out := &in.ServiceAccountPublicKeyFiles, &out.ServiceAccountPublicKeyFiles + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.OAuthConfig != nil { + in, out := &in.OAuthConfig, &out.OAuthConfig + *out = new(osinv1.OAuthConfig) + (*in).DeepCopyInto(*out) + } + if in.APIServerArguments != nil { + in, out := &in.APIServerArguments, &out.APIServerArguments + *out = make(map[string]Arguments, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make(Arguments, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeAPIServerConfig. +func (in *KubeAPIServerConfig) DeepCopy() *KubeAPIServerConfig { + if in == nil { + return nil + } + out := new(KubeAPIServerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KubeAPIServerConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeAPIServerImagePolicyConfig) DeepCopyInto(out *KubeAPIServerImagePolicyConfig) { + *out = *in + if in.ExternalRegistryHostnames != nil { + in, out := &in.ExternalRegistryHostnames, &out.ExternalRegistryHostnames + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeAPIServerImagePolicyConfig. +func (in *KubeAPIServerImagePolicyConfig) DeepCopy() *KubeAPIServerImagePolicyConfig { + if in == nil { + return nil + } + out := new(KubeAPIServerImagePolicyConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeAPIServerProjectConfig) DeepCopyInto(out *KubeAPIServerProjectConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeAPIServerProjectConfig. +func (in *KubeAPIServerProjectConfig) DeepCopy() *KubeAPIServerProjectConfig { + if in == nil { + return nil + } + out := new(KubeAPIServerProjectConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeControllerManagerConfig) DeepCopyInto(out *KubeControllerManagerConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ServiceServingCert = in.ServiceServingCert + out.ProjectConfig = in.ProjectConfig + if in.ExtendedArguments != nil { + in, out := &in.ExtendedArguments, &out.ExtendedArguments + *out = make(map[string]Arguments, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make(Arguments, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeControllerManagerConfig. +func (in *KubeControllerManagerConfig) DeepCopy() *KubeControllerManagerConfig { + if in == nil { + return nil + } + out := new(KubeControllerManagerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KubeControllerManagerConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeControllerManagerProjectConfig) DeepCopyInto(out *KubeControllerManagerProjectConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeControllerManagerProjectConfig. +func (in *KubeControllerManagerProjectConfig) DeepCopy() *KubeControllerManagerProjectConfig { + if in == nil { + return nil + } + out := new(KubeControllerManagerProjectConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeletConnectionInfo) DeepCopyInto(out *KubeletConnectionInfo) { + *out = *in + out.CertInfo = in.CertInfo + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeletConnectionInfo. +func (in *KubeletConnectionInfo) DeepCopy() *KubeletConnectionInfo { + if in == nil { + return nil + } + out := new(KubeletConnectionInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MasterAuthConfig) DeepCopyInto(out *MasterAuthConfig) { + *out = *in + if in.RequestHeader != nil { + in, out := &in.RequestHeader, &out.RequestHeader + *out = new(RequestHeaderAuthenticationOptions) + (*in).DeepCopyInto(*out) + } + if in.WebhookTokenAuthenticators != nil { + in, out := &in.WebhookTokenAuthenticators, &out.WebhookTokenAuthenticators + *out = make([]WebhookTokenAuthenticator, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MasterAuthConfig. +func (in *MasterAuthConfig) DeepCopy() *MasterAuthConfig { + if in == nil { + return nil + } + out := new(MasterAuthConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequestHeaderAuthenticationOptions) DeepCopyInto(out *RequestHeaderAuthenticationOptions) { + *out = *in + if in.ClientCommonNames != nil { + in, out := &in.ClientCommonNames, &out.ClientCommonNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.UsernameHeaders != nil { + in, out := &in.UsernameHeaders, &out.UsernameHeaders + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.GroupHeaders != nil { + in, out := &in.GroupHeaders, &out.GroupHeaders + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExtraHeaderPrefixes != nil { + in, out := &in.ExtraHeaderPrefixes, &out.ExtraHeaderPrefixes + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestHeaderAuthenticationOptions. +func (in *RequestHeaderAuthenticationOptions) DeepCopy() *RequestHeaderAuthenticationOptions { + if in == nil { + return nil + } + out := new(RequestHeaderAuthenticationOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceServingCert) DeepCopyInto(out *ServiceServingCert) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceServingCert. +func (in *ServiceServingCert) DeepCopy() *ServiceServingCert { + if in == nil { + return nil + } + out := new(ServiceServingCert) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserAgentDenyRule) DeepCopyInto(out *UserAgentDenyRule) { + *out = *in + in.UserAgentMatchRule.DeepCopyInto(&out.UserAgentMatchRule) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserAgentDenyRule. +func (in *UserAgentDenyRule) DeepCopy() *UserAgentDenyRule { + if in == nil { + return nil + } + out := new(UserAgentDenyRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserAgentMatchRule) DeepCopyInto(out *UserAgentMatchRule) { + *out = *in + if in.HTTPVerbs != nil { + in, out := &in.HTTPVerbs, &out.HTTPVerbs + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserAgentMatchRule. +func (in *UserAgentMatchRule) DeepCopy() *UserAgentMatchRule { + if in == nil { + return nil + } + out := new(UserAgentMatchRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserAgentMatchingConfig) DeepCopyInto(out *UserAgentMatchingConfig) { + *out = *in + if in.RequiredClients != nil { + in, out := &in.RequiredClients, &out.RequiredClients + *out = make([]UserAgentMatchRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DeniedClients != nil { + in, out := &in.DeniedClients, &out.DeniedClients + *out = make([]UserAgentDenyRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserAgentMatchingConfig. +func (in *UserAgentMatchingConfig) DeepCopy() *UserAgentMatchingConfig { + if in == nil { + return nil + } + out := new(UserAgentMatchingConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookTokenAuthenticator) DeepCopyInto(out *WebhookTokenAuthenticator) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookTokenAuthenticator. +func (in *WebhookTokenAuthenticator) DeepCopy() *WebhookTokenAuthenticator { + if in == nil { + return nil + } + out := new(WebhookTokenAuthenticator) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/api/kubecontrolplane/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/kubecontrolplane/v1/zz_generated.swagger_doc_generated.go new file mode 100644 index 000000000..3ee8b23fd --- /dev/null +++ b/vendor/github.com/openshift/api/kubecontrolplane/v1/zz_generated.swagger_doc_generated.go @@ -0,0 +1,161 @@ +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_AggregatorConfig = map[string]string{ + "": "AggregatorConfig holds information required to make the aggregator function.", + "proxyClientInfo": "proxyClientInfo specifies the client cert/key to use when proxying to aggregated API servers", +} + +func (AggregatorConfig) SwaggerDoc() map[string]string { + return map_AggregatorConfig +} + +var map_KubeAPIServerConfig = map[string]string{ + "": "Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "authConfig": "authConfig configures authentication options in addition to the standard oauth token and client certificate authenticators", + "aggregatorConfig": "aggregatorConfig has options for configuring the aggregator component of the API server.", + "kubeletClientInfo": "kubeletClientInfo contains information about how to connect to kubelets", + "servicesSubnet": "servicesSubnet is the subnet to use for assigning service IPs", + "servicesNodePortRange": "servicesNodePortRange is the range to use for assigning service public ports on a host.", + "consolePublicURL": "DEPRECATED: consolePublicURL has been deprecated and setting it has no effect.", + "userAgentMatchingConfig": "UserAgentMatchingConfig controls how API calls from *voluntarily* identifying clients will be handled. THIS DOES NOT DEFEND AGAINST MALICIOUS CLIENTS!", + "imagePolicyConfig": "imagePolicyConfig feeds the image policy admission plugin", + "projectConfig": "projectConfig feeds an admission plugin", + "serviceAccountPublicKeyFiles": "serviceAccountPublicKeyFiles is a list of files, each containing a PEM-encoded public RSA key. (If any file contains a private key, the public portion of the key is used) The list of public keys is used to verify presented service account tokens. Each key is tried in order until the list is exhausted or verification succeeds. If no keys are specified, no service account authentication will be available.", + "oauthConfig": "oauthConfig, if present start the /oauth endpoint in this process", +} + +func (KubeAPIServerConfig) SwaggerDoc() map[string]string { + return map_KubeAPIServerConfig +} + +var map_KubeAPIServerImagePolicyConfig = map[string]string{ + "internalRegistryHostname": "internalRegistryHostname sets the hostname for the default internal image registry. The value must be in \"hostname[:port]\" format. For backward compatibility, users can still use OPENSHIFT_DEFAULT_REGISTRY environment variable but this setting overrides the environment variable.", + "externalRegistryHostnames": "externalRegistryHostnames provides the hostnames for the default external image registry. The external hostname should be set only when the image registry is exposed externally. The first value is used in 'publicDockerImageRepository' field in ImageStreams. The value must be in \"hostname[:port]\" format.", +} + +func (KubeAPIServerImagePolicyConfig) SwaggerDoc() map[string]string { + return map_KubeAPIServerImagePolicyConfig +} + +var map_KubeAPIServerProjectConfig = map[string]string{ + "defaultNodeSelector": "defaultNodeSelector holds default project node label selector", +} + +func (KubeAPIServerProjectConfig) SwaggerDoc() map[string]string { + return map_KubeAPIServerProjectConfig +} + +var map_KubeControllerManagerConfig = map[string]string{ + "": "Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "serviceServingCert": "serviceServingCert provides support for the old alpha service serving cert signer CA bundle", + "projectConfig": "projectConfig is an optimization for the daemonset controller", + "extendedArguments": "extendedArguments is used to configure the kube-controller-manager", +} + +func (KubeControllerManagerConfig) SwaggerDoc() map[string]string { + return map_KubeControllerManagerConfig +} + +var map_KubeControllerManagerProjectConfig = map[string]string{ + "defaultNodeSelector": "defaultNodeSelector holds default project node label selector", +} + +func (KubeControllerManagerProjectConfig) SwaggerDoc() map[string]string { + return map_KubeControllerManagerProjectConfig +} + +var map_KubeletConnectionInfo = map[string]string{ + "": "KubeletConnectionInfo holds information necessary for connecting to a kubelet", + "port": "port is the port to connect to kubelets on", + "ca": "ca is the CA for verifying TLS connections to kubelets", +} + +func (KubeletConnectionInfo) SwaggerDoc() map[string]string { + return map_KubeletConnectionInfo +} + +var map_MasterAuthConfig = map[string]string{ + "": "MasterAuthConfig configures authentication options in addition to the standard oauth token and client certificate authenticators", + "requestHeader": "requestHeader holds options for setting up a front proxy against the API. It is optional.", + "webhookTokenAuthenticators": "webhookTokenAuthenticators, if present configures remote token reviewers", + "oauthMetadataFile": "oauthMetadataFile is a path to a file containing the discovery endpoint for OAuth 2.0 Authorization Server Metadata for an external OAuth server. See IETF Draft: // https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 This option is mutually exclusive with OAuthConfig", +} + +func (MasterAuthConfig) SwaggerDoc() map[string]string { + return map_MasterAuthConfig +} + +var map_RequestHeaderAuthenticationOptions = map[string]string{ + "": "RequestHeaderAuthenticationOptions provides options for setting up a front proxy against the entire API instead of against the /oauth endpoint.", + "clientCA": "clientCA is a file with the trusted signer certs. It is required.", + "clientCommonNames": "clientCommonNames is a required list of common names to require a match from.", + "usernameHeaders": "usernameHeaders is the list of headers to check for user information. First hit wins.", + "groupHeaders": "groupHeaders is the set of headers to check for group information. All are unioned.", + "extraHeaderPrefixes": "extraHeaderPrefixes is the set of request header prefixes to inspect for user extra. X-Remote-Extra- is suggested.", +} + +func (RequestHeaderAuthenticationOptions) SwaggerDoc() map[string]string { + return map_RequestHeaderAuthenticationOptions +} + +var map_ServiceServingCert = map[string]string{ + "": "ServiceServingCert holds configuration for service serving cert signer which creates cert/key pairs for pods fulfilling a service to serve with.", + "certFile": "CertFile is a file containing a PEM-encoded certificate", +} + +func (ServiceServingCert) SwaggerDoc() map[string]string { + return map_ServiceServingCert +} + +var map_UserAgentDenyRule = map[string]string{ + "": "UserAgentDenyRule adds a rejection message that can be used to help a user figure out how to get an approved client", + "rejectionMessage": "RejectionMessage is the message shown when rejecting a client. If it is not a set, the default message is used.", +} + +func (UserAgentDenyRule) SwaggerDoc() map[string]string { + return map_UserAgentDenyRule +} + +var map_UserAgentMatchRule = map[string]string{ + "": "UserAgentMatchRule describes how to match a given request based on User-Agent and HTTPVerb", + "regex": "regex is a regex that is checked against the User-Agent. Known variants of oc clients 1. oc accessing kube resources: oc/v1.2.0 (linux/amd64) kubernetes/bc4550d 2. oc accessing openshift resources: oc/v1.1.3 (linux/amd64) openshift/b348c2f 3. openshift kubectl accessing kube resources: openshift/v1.2.0 (linux/amd64) kubernetes/bc4550d 4. openshift kubectl accessing openshift resources: openshift/v1.1.3 (linux/amd64) openshift/b348c2f 5. oadm accessing kube resources: oadm/v1.2.0 (linux/amd64) kubernetes/bc4550d 6. oadm accessing openshift resources: oadm/v1.1.3 (linux/amd64) openshift/b348c2f 7. openshift cli accessing kube resources: openshift/v1.2.0 (linux/amd64) kubernetes/bc4550d 8. openshift cli accessing openshift resources: openshift/v1.1.3 (linux/amd64) openshift/b348c2f", + "httpVerbs": "httpVerbs specifies which HTTP verbs should be matched. An empty list means \"match all verbs\".", +} + +func (UserAgentMatchRule) SwaggerDoc() map[string]string { + return map_UserAgentMatchRule +} + +var map_UserAgentMatchingConfig = map[string]string{ + "": "UserAgentMatchingConfig controls how API calls from *voluntarily* identifying clients will be handled. THIS DOES NOT DEFEND AGAINST MALICIOUS CLIENTS!", + "requiredClients": "requiredClients if this list is non-empty, then a User-Agent must match one of the UserAgentRegexes to be allowed", + "deniedClients": "deniedClients if this list is non-empty, then a User-Agent must not match any of the UserAgentRegexes", + "defaultRejectionMessage": "defaultRejectionMessage is the message shown when rejecting a client. If it is not a set, a generic message is given.", +} + +func (UserAgentMatchingConfig) SwaggerDoc() map[string]string { + return map_UserAgentMatchingConfig +} + +var map_WebhookTokenAuthenticator = map[string]string{ + "": "WebhookTokenAuthenticators holds the necessary configuation options for external token authenticators", + "configFile": "configFile is a path to a Kubeconfig file with the webhook configuration", + "cacheTTL": "cacheTTL indicates how long an authentication result should be cached. It takes a valid time duration string (e.g. \"5m\"). If empty, you get a default timeout of 2 minutes. If zero (e.g. \"0m\"), caching is disabled", +} + +func (WebhookTokenAuthenticator) SwaggerDoc() map[string]string { + return map_WebhookTokenAuthenticator +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/api/legacyconfig/v1/doc.go b/vendor/github.com/openshift/api/legacyconfig/v1/doc.go new file mode 100644 index 000000000..93fc6dc50 --- /dev/null +++ b/vendor/github.com/openshift/api/legacyconfig/v1/doc.go @@ -0,0 +1,7 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true + +// +groupName=legacy.config.openshift.io +// Package v1 is deprecated and exists to ease a transition to current APIs +package v1 diff --git a/vendor/github.com/openshift/api/legacyconfig/v1/register.go b/vendor/github.com/openshift/api/legacyconfig/v1/register.go new file mode 100644 index 000000000..8ba752521 --- /dev/null +++ b/vendor/github.com/openshift/api/legacyconfig/v1/register.go @@ -0,0 +1,46 @@ +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + // Legacy is the 'v1' apiVersion of config + LegacyGroupName = "" + GroupVersion = schema.GroupVersion{Group: LegacyGroupName, Version: "v1"} + LegacySchemeGroupVersion = GroupVersion + legacySchemeBuilder = runtime.NewSchemeBuilder( + addKnownTypesToLegacy, + ) + InstallLegacy = legacySchemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypesToLegacy(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(LegacySchemeGroupVersion, + &MasterConfig{}, + &NodeConfig{}, + &SessionSecrets{}, + + &BasicAuthPasswordIdentityProvider{}, + &AllowAllPasswordIdentityProvider{}, + &DenyAllPasswordIdentityProvider{}, + &HTPasswdPasswordIdentityProvider{}, + &LDAPPasswordIdentityProvider{}, + &KeystonePasswordIdentityProvider{}, + &RequestHeaderIdentityProvider{}, + &GitHubIdentityProvider{}, + &GitLabIdentityProvider{}, + &GoogleIdentityProvider{}, + &OpenIDIdentityProvider{}, + + &LDAPSyncConfig{}, + + &DefaultAdmissionConfig{}, + + &BuildDefaultsConfig{}, + &BuildOverridesConfig{}, + ) + return nil +} diff --git a/vendor/github.com/openshift/api/legacyconfig/v1/serialization.go b/vendor/github.com/openshift/api/legacyconfig/v1/serialization.go new file mode 100644 index 000000000..145074273 --- /dev/null +++ b/vendor/github.com/openshift/api/legacyconfig/v1/serialization.go @@ -0,0 +1,87 @@ +package v1 + +import "k8s.io/apimachinery/pkg/runtime" + +var _ runtime.NestedObjectDecoder = &MasterConfig{} + +// DecodeNestedObjects handles encoding RawExtensions on the MasterConfig, ensuring the +// objects are decoded with the provided decoder. +func (c *MasterConfig) DecodeNestedObjects(d runtime.Decoder) error { + // decoding failures result in a runtime.Unknown object being created in Object and passed + // to conversion + for k, v := range c.AdmissionConfig.PluginConfig { + DecodeNestedRawExtensionOrUnknown(d, &v.Configuration) + c.AdmissionConfig.PluginConfig[k] = v + } + if c.OAuthConfig != nil { + for i := range c.OAuthConfig.IdentityProviders { + DecodeNestedRawExtensionOrUnknown(d, &c.OAuthConfig.IdentityProviders[i].Provider) + } + } + DecodeNestedRawExtensionOrUnknown(d, &c.AuditConfig.PolicyConfiguration) + return nil +} + +var _ runtime.NestedObjectEncoder = &MasterConfig{} + +// EncodeNestedObjects handles encoding RawExtensions on the MasterConfig, ensuring the +// objects are encoded with the provided encoder. +func (c *MasterConfig) EncodeNestedObjects(e runtime.Encoder) error { + for k, v := range c.AdmissionConfig.PluginConfig { + if err := EncodeNestedRawExtension(e, &v.Configuration); err != nil { + return err + } + c.AdmissionConfig.PluginConfig[k] = v + } + if c.OAuthConfig != nil { + for i := range c.OAuthConfig.IdentityProviders { + if err := EncodeNestedRawExtension(e, &c.OAuthConfig.IdentityProviders[i].Provider); err != nil { + return err + } + } + } + if err := EncodeNestedRawExtension(e, &c.AuditConfig.PolicyConfiguration); err != nil { + return err + } + return nil +} + +// DecodeNestedRawExtensionOrUnknown +func DecodeNestedRawExtensionOrUnknown(d runtime.Decoder, ext *runtime.RawExtension) { + if ext.Raw == nil || ext.Object != nil { + return + } + obj, gvk, err := d.Decode(ext.Raw, nil, nil) + if err != nil { + unk := &runtime.Unknown{Raw: ext.Raw} + if runtime.IsNotRegisteredError(err) { + if _, gvk, err := d.Decode(ext.Raw, nil, unk); err == nil { + unk.APIVersion = gvk.GroupVersion().String() + unk.Kind = gvk.Kind + ext.Object = unk + return + } + } + // TODO: record mime-type with the object + if gvk != nil { + unk.APIVersion = gvk.GroupVersion().String() + unk.Kind = gvk.Kind + } + obj = unk + } + ext.Object = obj +} + +// EncodeNestedRawExtension will encode the object in the RawExtension (if not nil) or +// return an error. +func EncodeNestedRawExtension(e runtime.Encoder, ext *runtime.RawExtension) error { + if ext.Raw != nil || ext.Object == nil { + return nil + } + data, err := runtime.Encode(e, ext.Object) + if err != nil { + return err + } + ext.Raw = data + return nil +} diff --git a/vendor/github.com/openshift/api/legacyconfig/v1/stringsource.go b/vendor/github.com/openshift/api/legacyconfig/v1/stringsource.go new file mode 100644 index 000000000..6a5718c1d --- /dev/null +++ b/vendor/github.com/openshift/api/legacyconfig/v1/stringsource.go @@ -0,0 +1,31 @@ +package v1 + +import "encoding/json" + +// UnmarshalJSON implements the json.Unmarshaller interface. +// If the value is a string, it sets the Value field of the StringSource. +// Otherwise, it is unmarshaled into the StringSourceSpec struct +func (s *StringSource) UnmarshalJSON(value []byte) error { + // If we can unmarshal to a simple string, just set the value + var simpleValue string + if err := json.Unmarshal(value, &simpleValue); err == nil { + s.Value = simpleValue + return nil + } + + // Otherwise do the full struct unmarshal + return json.Unmarshal(value, &s.StringSourceSpec) +} + +// MarshalJSON implements the json.Marshaller interface. +// If the StringSource contains only a string Value (or is empty), it is marshaled as a JSON string. +// Otherwise, the StringSourceSpec struct is marshaled as a JSON object. +func (s *StringSource) MarshalJSON() ([]byte, error) { + // If we have only a cleartext value set, do a simple string marshal + if s.StringSourceSpec == (StringSourceSpec{Value: s.Value}) { + return json.Marshal(s.Value) + } + + // Otherwise do the full struct marshal of the externalized bits + return json.Marshal(s.StringSourceSpec) +} diff --git a/vendor/github.com/openshift/api/legacyconfig/v1/types.go b/vendor/github.com/openshift/api/legacyconfig/v1/types.go new file mode 100644 index 000000000..871eadd8b --- /dev/null +++ b/vendor/github.com/openshift/api/legacyconfig/v1/types.go @@ -0,0 +1,1599 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + + buildv1 "github.com/openshift/api/build/v1" +) + +type ExtendedArguments map[string][]string + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// NodeConfig is the fully specified config starting an OpenShift node +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +// +openshift:compatibility-gen:internal +type NodeConfig struct { + metav1.TypeMeta `json:",inline"` + + // NodeName is the value used to identify this particular node in the cluster. If possible, this should be your fully qualified hostname. + // If you're describing a set of static nodes to the master, this value must match one of the values in the list + NodeName string `json:"nodeName"` + + // Node may have multiple IPs, specify the IP to use for pod traffic routing + // If not specified, network parse/lookup on the nodeName is performed and the first non-loopback address is used + NodeIP string `json:"nodeIP"` + + // ServingInfo describes how to start serving + ServingInfo ServingInfo `json:"servingInfo"` + + // MasterKubeConfig is a filename for the .kubeconfig file that describes how to connect this node to the master + MasterKubeConfig string `json:"masterKubeConfig"` + + // MasterClientConnectionOverrides provides overrides to the client connection used to connect to the master. + MasterClientConnectionOverrides *ClientConnectionOverrides `json:"masterClientConnectionOverrides"` + + // DNSDomain holds the domain suffix that will be used for the DNS search path inside each container. Defaults to + // 'cluster.local'. + DNSDomain string `json:"dnsDomain"` + + // DNSIP is the IP address that pods will use to access cluster DNS. Defaults to the service IP of the Kubernetes + // master. This IP must be listening on port 53 for compatibility with libc resolvers (which cannot be configured + // to resolve names from any other port). When running more complex local DNS configurations, this is often set + // to the local address of a DNS proxy like dnsmasq, which then will consult either the local DNS (see + // dnsBindAddress) or the master DNS. + DNSIP string `json:"dnsIP"` + + // DNSBindAddress is the ip:port to serve DNS on. If this is not set, the DNS server will not be started. + // Because most DNS resolvers will only listen on port 53, if you select an alternative port you will need + // a DNS proxy like dnsmasq to answer queries for containers. A common configuration is dnsmasq configured + // on a node IP listening on 53 and delegating queries for dnsDomain to this process, while sending other + // queries to the host environments nameservers. + DNSBindAddress string `json:"dnsBindAddress"` + + // DNSNameservers is a list of ip:port values of recursive nameservers to forward queries to when running + // a local DNS server if dnsBindAddress is set. If this value is empty, the DNS server will default to + // the nameservers listed in /etc/resolv.conf. If you have configured dnsmasq or another DNS proxy on the + // system, this value should be set to the upstream nameservers dnsmasq resolves with. + DNSNameservers []string `json:"dnsNameservers"` + + // DNSRecursiveResolvConf is a path to a resolv.conf file that contains settings for an upstream server. + // Only the nameservers and port fields are used. The file must exist and parse correctly. It adds extra + // nameservers to DNSNameservers if set. + DNSRecursiveResolvConf string `json:"dnsRecursiveResolvConf"` + + // Deprecated and maintained for backward compatibility, use NetworkConfig.NetworkPluginName instead + DeprecatedNetworkPluginName string `json:"networkPluginName,omitempty"` + + // NetworkConfig provides network options for the node + NetworkConfig NodeNetworkConfig `json:"networkConfig"` + + // VolumeDirectory is the directory that volumes will be stored under + VolumeDirectory string `json:"volumeDirectory"` + + // ImageConfig holds options that describe how to build image names for system components + ImageConfig ImageConfig `json:"imageConfig"` + + // AllowDisabledDocker if true, the Kubelet will ignore errors from Docker. This means that a node can start on a machine that doesn't have docker started. + AllowDisabledDocker bool `json:"allowDisabledDocker"` + + // PodManifestConfig holds the configuration for enabling the Kubelet to + // create pods based from a manifest file(s) placed locally on the node + PodManifestConfig *PodManifestConfig `json:"podManifestConfig"` + + // AuthConfig holds authn/authz configuration options + AuthConfig NodeAuthConfig `json:"authConfig"` + + // DockerConfig holds Docker related configuration options. + DockerConfig DockerConfig `json:"dockerConfig"` + + // KubeletArguments are key value pairs that will be passed directly to the Kubelet that match the Kubelet's + // command line arguments. These are not migrated or validated, so if you use them they may become invalid. + // These values override other settings in NodeConfig which may cause invalid configurations. + KubeletArguments ExtendedArguments `json:"kubeletArguments,omitempty"` + + // ProxyArguments are key value pairs that will be passed directly to the Proxy that match the Proxy's + // command line arguments. These are not migrated or validated, so if you use them they may become invalid. + // These values override other settings in NodeConfig which may cause invalid configurations. + ProxyArguments ExtendedArguments `json:"proxyArguments,omitempty"` + + // IPTablesSyncPeriod is how often iptable rules are refreshed + IPTablesSyncPeriod string `json:"iptablesSyncPeriod"` + + // EnableUnidling controls whether or not the hybrid unidling proxy will be set up + EnableUnidling *bool `json:"enableUnidling"` + + // VolumeConfig contains options for configuring volumes on the node. + VolumeConfig NodeVolumeConfig `json:"volumeConfig"` +} + +// NodeVolumeConfig contains options for configuring volumes on the node. +type NodeVolumeConfig struct { + // LocalQuota contains options for controlling local volume quota on the node. + LocalQuota LocalQuota `json:"localQuota"` +} + +// MasterVolumeConfig contains options for configuring volume plugins in the master node. +type MasterVolumeConfig struct { + // DynamicProvisioningEnabled is a boolean that toggles dynamic provisioning off when false, defaults to true + DynamicProvisioningEnabled *bool `json:"dynamicProvisioningEnabled"` +} + +// LocalQuota contains options for controlling local volume quota on the node. +type LocalQuota struct { + // FSGroup can be specified to enable a quota on local storage use per unique FSGroup ID. + // At present this is only implemented for emptyDir volumes, and if the underlying + // volumeDirectory is on an XFS filesystem. + PerFSGroup *resource.Quantity `json:"perFSGroup"` +} + +// NodeAuthConfig holds authn/authz configuration options +type NodeAuthConfig struct { + // AuthenticationCacheTTL indicates how long an authentication result should be cached. + // It takes a valid time duration string (e.g. "5m"). If empty, you get the default timeout. If zero (e.g. "0m"), caching is disabled + AuthenticationCacheTTL string `json:"authenticationCacheTTL"` + + // AuthenticationCacheSize indicates how many authentication results should be cached. If 0, the default cache size is used. + AuthenticationCacheSize int `json:"authenticationCacheSize"` + + // AuthorizationCacheTTL indicates how long an authorization result should be cached. + // It takes a valid time duration string (e.g. "5m"). If empty, you get the default timeout. If zero (e.g. "0m"), caching is disabled + AuthorizationCacheTTL string `json:"authorizationCacheTTL"` + + // AuthorizationCacheSize indicates how many authorization results should be cached. If 0, the default cache size is used. + AuthorizationCacheSize int `json:"authorizationCacheSize"` +} + +// NodeNetworkConfig provides network options for the node +type NodeNetworkConfig struct { + // NetworkPluginName is a string specifying the networking plugin + NetworkPluginName string `json:"networkPluginName"` + // Maximum transmission unit for the network packets + MTU uint32 `json:"mtu"` +} + +// DockerConfig holds Docker related configuration options. +type DockerConfig struct { + // ExecHandlerName is the name of the handler to use for executing + // commands in containers. + ExecHandlerName DockerExecHandlerType `json:"execHandlerName"` + // DockerShimSocket is the location of the dockershim socket the kubelet uses. + // Currently unix socket is supported on Linux, and tcp is supported on windows. + // Examples:'unix:///var/run/dockershim.sock', 'tcp://localhost:3735' + DockerShimSocket string `json:"dockerShimSocket"` + // DockershimRootDirectory is the dockershim root directory. + DockershimRootDirectory string `json:"dockerShimRootDirectory"` +} + +type DockerExecHandlerType string + +const ( + // DockerExecHandlerNative uses Docker's exec API for executing commands in containers. + DockerExecHandlerNative DockerExecHandlerType = "native" + // DockerExecHandlerNsenter uses nsenter for executing commands in containers. + DockerExecHandlerNsenter DockerExecHandlerType = "nsenter" + + // ControllersDisabled indicates no controllers should be enabled. + ControllersDisabled = "none" + // ControllersAll indicates all controllers should be started. + ControllersAll = "*" +) + +// FeatureList contains a set of features +type FeatureList []string + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// MasterConfig holds the necessary configuration options for the OpenShift master +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +// +openshift:compatibility-gen:internal +type MasterConfig struct { + metav1.TypeMeta `json:",inline"` + + // ServingInfo describes how to start serving + ServingInfo HTTPServingInfo `json:"servingInfo"` + + // AuthConfig configures authentication options in addition to the standard + // oauth token and client certificate authenticators + AuthConfig MasterAuthConfig `json:"authConfig"` + + // AggregatorConfig has options for configuring the aggregator component of the API server. + AggregatorConfig AggregatorConfig `json:"aggregatorConfig"` + + // CORSAllowedOrigins + CORSAllowedOrigins []string `json:"corsAllowedOrigins"` + + // APILevels is a list of API levels that should be enabled on startup: v1 as examples + APILevels []string `json:"apiLevels"` + + // MasterPublicURL is how clients can access the OpenShift API server + MasterPublicURL string `json:"masterPublicURL"` + + // Controllers is a list of the controllers that should be started. If set to "none", no controllers + // will start automatically. The default value is "*" which will start all controllers. When + // using "*", you may exclude controllers by prepending a "-" in front of their name. No other + // values are recognized at this time. + Controllers string `json:"controllers"` + + // AdmissionConfig contains admission control plugin configuration. + AdmissionConfig AdmissionConfig `json:"admissionConfig"` + + // ControllerConfig holds configuration values for controllers + ControllerConfig ControllerConfig `json:"controllerConfig"` + + // EtcdStorageConfig contains information about how API resources are + // stored in Etcd. These values are only relevant when etcd is the + // backing store for the cluster. + EtcdStorageConfig EtcdStorageConfig `json:"etcdStorageConfig"` + + // EtcdClientInfo contains information about how to connect to etcd + EtcdClientInfo EtcdConnectionInfo `json:"etcdClientInfo"` + // KubeletClientInfo contains information about how to connect to kubelets + KubeletClientInfo KubeletConnectionInfo `json:"kubeletClientInfo"` + + // KubernetesMasterConfig, if present start the kubernetes master in this process + KubernetesMasterConfig KubernetesMasterConfig `json:"kubernetesMasterConfig"` + // EtcdConfig, if present start etcd in this process + EtcdConfig *EtcdConfig `json:"etcdConfig"` + // OAuthConfig, if present start the /oauth endpoint in this process + OAuthConfig *OAuthConfig `json:"oauthConfig"` + + // DNSConfig, if present start the DNS server in this process + DNSConfig *DNSConfig `json:"dnsConfig"` + + // ServiceAccountConfig holds options related to service accounts + ServiceAccountConfig ServiceAccountConfig `json:"serviceAccountConfig"` + + // MasterClients holds all the client connection information for controllers and other system components + MasterClients MasterClients `json:"masterClients"` + + // ImageConfig holds options that describe how to build image names for system components + ImageConfig ImageConfig `json:"imageConfig"` + + // ImagePolicyConfig controls limits and behavior for importing images + ImagePolicyConfig ImagePolicyConfig `json:"imagePolicyConfig"` + + // PolicyConfig holds information about where to locate critical pieces of bootstrapping policy + PolicyConfig PolicyConfig `json:"policyConfig"` + + // ProjectConfig holds information about project creation and defaults + ProjectConfig ProjectConfig `json:"projectConfig"` + + // RoutingConfig holds information about routing and route generation + RoutingConfig RoutingConfig `json:"routingConfig"` + + // NetworkConfig to be passed to the compiled in network plugin + NetworkConfig MasterNetworkConfig `json:"networkConfig"` + + // MasterVolumeConfig contains options for configuring volume plugins in the master node. + VolumeConfig MasterVolumeConfig `json:"volumeConfig"` + + // JenkinsPipelineConfig holds information about the default Jenkins template + // used for JenkinsPipeline build strategy. + JenkinsPipelineConfig JenkinsPipelineConfig `json:"jenkinsPipelineConfig"` + + // AuditConfig holds information related to auditing capabilities. + AuditConfig AuditConfig `json:"auditConfig"` + + // DisableOpenAPI avoids starting the openapi endpoint because it is very expensive. + // This option will be removed at a later time. It is never serialized. + DisableOpenAPI bool `json:"-"` +} + +// MasterAuthConfig configures authentication options in addition to the standard +// oauth token and client certificate authenticators +type MasterAuthConfig struct { + // RequestHeader holds options for setting up a front proxy against the API. It is optional. + RequestHeader *RequestHeaderAuthenticationOptions `json:"requestHeader"` + // WebhookTokenAuthnConfig, if present configures remote token reviewers + WebhookTokenAuthenticators []WebhookTokenAuthenticator `json:"webhookTokenAuthenticators"` + // OAuthMetadataFile is a path to a file containing the discovery endpoint for OAuth 2.0 Authorization + // Server Metadata for an external OAuth server. + // See IETF Draft: // https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 + // This option is mutually exclusive with OAuthConfig + OAuthMetadataFile string `json:"oauthMetadataFile"` +} + +// RequestHeaderAuthenticationOptions provides options for setting up a front proxy against the entire +// API instead of against the /oauth endpoint. +type RequestHeaderAuthenticationOptions struct { + // ClientCA is a file with the trusted signer certs. It is required. + ClientCA string `json:"clientCA"` + // ClientCommonNames is a required list of common names to require a match from. + ClientCommonNames []string `json:"clientCommonNames"` + + // UsernameHeaders is the list of headers to check for user information. First hit wins. + UsernameHeaders []string `json:"usernameHeaders"` + // GroupNameHeader is the set of headers to check for group information. All are unioned. + GroupHeaders []string `json:"groupHeaders"` + // ExtraHeaderPrefixes is the set of request header prefixes to inspect for user extra. X-Remote-Extra- is suggested. + ExtraHeaderPrefixes []string `json:"extraHeaderPrefixes"` +} + +// AggregatorConfig holds information required to make the aggregator function. +type AggregatorConfig struct { + // ProxyClientInfo specifies the client cert/key to use when proxying to aggregated API servers + ProxyClientInfo CertInfo `json:"proxyClientInfo"` +} + +type LogFormatType string + +type WebHookModeType string + +const ( + // LogFormatLegacy saves event in 1-line text format. + LogFormatLegacy LogFormatType = "legacy" + // LogFormatJson saves event in structured json format. + LogFormatJson LogFormatType = "json" + + // WebHookModeBatch indicates that the webhook should buffer audit events + // internally, sending batch updates either once a certain number of + // events have been received or a certain amount of time has passed. + WebHookModeBatch WebHookModeType = "batch" + // WebHookModeBlocking causes the webhook to block on every attempt to process + // a set of events. This causes requests to the API server to wait for a + // round trip to the external audit service before sending a response. + WebHookModeBlocking WebHookModeType = "blocking" +) + +// AuditConfig holds configuration for the audit capabilities +type AuditConfig struct { + // If this flag is set, audit log will be printed in the logs. + // The logs contains, method, user and a requested URL. + Enabled bool `json:"enabled"` + // All requests coming to the apiserver will be logged to this file. + AuditFilePath string `json:"auditFilePath"` + // Maximum number of days to retain old log files based on the timestamp encoded in their filename. + MaximumFileRetentionDays int `json:"maximumFileRetentionDays"` + // Maximum number of old log files to retain. + MaximumRetainedFiles int `json:"maximumRetainedFiles"` + // Maximum size in megabytes of the log file before it gets rotated. Defaults to 100MB. + MaximumFileSizeMegabytes int `json:"maximumFileSizeMegabytes"` + + // PolicyFile is a path to the file that defines the audit policy configuration. + PolicyFile string `json:"policyFile"` + // PolicyConfiguration is an embedded policy configuration object to be used + // as the audit policy configuration. If present, it will be used instead of + // the path to the policy file. + PolicyConfiguration runtime.RawExtension `json:"policyConfiguration"` + + // Format of saved audits (legacy or json). + LogFormat LogFormatType `json:"logFormat"` + + // Path to a .kubeconfig formatted file that defines the audit webhook configuration. + WebHookKubeConfig string `json:"webHookKubeConfig"` + // Strategy for sending audit events (block or batch). + WebHookMode WebHookModeType `json:"webHookMode"` +} + +// JenkinsPipelineConfig holds configuration for the Jenkins pipeline strategy +type JenkinsPipelineConfig struct { + // AutoProvisionEnabled determines whether a Jenkins server will be spawned from the provided + // template when the first build config in the project with type JenkinsPipeline + // is created. When not specified this option defaults to true. + AutoProvisionEnabled *bool `json:"autoProvisionEnabled"` + // TemplateNamespace contains the namespace name where the Jenkins template is stored + TemplateNamespace string `json:"templateNamespace"` + // TemplateName is the name of the default Jenkins template + TemplateName string `json:"templateName"` + // ServiceName is the name of the Jenkins service OpenShift uses to detect + // whether a Jenkins pipeline handler has already been installed in a project. + // This value *must* match a service name in the provided template. + ServiceName string `json:"serviceName"` + // Parameters specifies a set of optional parameters to the Jenkins template. + Parameters map[string]string `json:"parameters"` +} + +// ImagePolicyConfig holds the necessary configuration options for limits and behavior for importing images +type ImagePolicyConfig struct { + // MaxImagesBulkImportedPerRepository controls the number of images that are imported when a user + // does a bulk import of a container repository. This number defaults to 50 to prevent users from + // importing large numbers of images accidentally. Set -1 for no limit. + MaxImagesBulkImportedPerRepository int `json:"maxImagesBulkImportedPerRepository"` + // DisableScheduledImport allows scheduled background import of images to be disabled. + DisableScheduledImport bool `json:"disableScheduledImport"` + // ScheduledImageImportMinimumIntervalSeconds is the minimum number of seconds that can elapse between when image streams + // scheduled for background import are checked against the upstream repository. The default value is 15 minutes. + ScheduledImageImportMinimumIntervalSeconds int `json:"scheduledImageImportMinimumIntervalSeconds"` + // MaxScheduledImageImportsPerMinute is the maximum number of scheduled image streams that will be imported in the + // background per minute. The default value is 60. Set to -1 for unlimited. + MaxScheduledImageImportsPerMinute int `json:"maxScheduledImageImportsPerMinute"` + // AllowedRegistriesForImport limits the container image registries that normal users may import + // images from. Set this list to the registries that you trust to contain valid Docker + // images and that you want applications to be able to import from. Users with + // permission to create Images or ImageStreamMappings via the API are not affected by + // this policy - typically only administrators or system integrations will have those + // permissions. + AllowedRegistriesForImport *AllowedRegistries `json:"allowedRegistriesForImport,omitempty"` + // InternalRegistryHostname sets the hostname for the default internal image + // registry. The value must be in "hostname[:port]" format. + // For backward compatibility, users can still use OPENSHIFT_DEFAULT_REGISTRY + // environment variable but this setting overrides the environment variable. + InternalRegistryHostname string `json:"internalRegistryHostname,omitempty"` + // ExternalRegistryHostname sets the hostname for the default external image + // registry. The external hostname should be set only when the image registry + // is exposed externally. The value is used in 'publicDockerImageRepository' + // field in ImageStreams. The value must be in "hostname[:port]" format. + ExternalRegistryHostname string `json:"externalRegistryHostname,omitempty"` + // AdditionalTrustedCA is a path to a pem bundle file containing additional CAs that + // should be trusted during imagestream import. + AdditionalTrustedCA string `json:"additionalTrustedCA,omitempty"` +} + +// AllowedRegistries represents a list of registries allowed for the image import. +type AllowedRegistries []RegistryLocation + +// RegistryLocation contains a location of the registry specified by the registry domain +// name. The domain name might include wildcards, like '*' or '??'. +type RegistryLocation struct { + // DomainName specifies a domain name for the registry + // In case the registry use non-standard (80 or 443) port, the port should be included + // in the domain name as well. + DomainName string `json:"domainName"` + // Insecure indicates whether the registry is secure (https) or insecure (http) + // By default (if not specified) the registry is assumed as secure. + Insecure bool `json:"insecure,omitempty"` +} + +// holds the necessary configuration options for +type ProjectConfig struct { + // DefaultNodeSelector holds default project node label selector + DefaultNodeSelector string `json:"defaultNodeSelector"` + + // ProjectRequestMessage is the string presented to a user if they are unable to request a project via the projectrequest api endpoint + ProjectRequestMessage string `json:"projectRequestMessage"` + + // ProjectRequestTemplate is the template to use for creating projects in response to projectrequest. + // It is in the format namespace/template and it is optional. + // If it is not specified, a default template is used. + ProjectRequestTemplate string `json:"projectRequestTemplate"` + + // SecurityAllocator controls the automatic allocation of UIDs and MCS labels to a project. If nil, allocation is disabled. + SecurityAllocator *SecurityAllocator `json:"securityAllocator"` +} + +// SecurityAllocator controls the automatic allocation of UIDs and MCS labels to a project. If nil, allocation is disabled. +type SecurityAllocator struct { + // UIDAllocatorRange defines the total set of Unix user IDs (UIDs) that will be allocated to projects automatically, and the size of the + // block each namespace gets. For example, 1000-1999/10 will allocate ten UIDs per namespace, and will be able to allocate up to 100 blocks + // before running out of space. The default is to allocate from 1 billion to 2 billion in 10k blocks (which is the expected size of the + // ranges container images will use once user namespaces are started). + UIDAllocatorRange string `json:"uidAllocatorRange"` + // MCSAllocatorRange defines the range of MCS categories that will be assigned to namespaces. The format is + // "/[,]". The default is "s0/2" and will allocate from c0 -> c1023, which means a total of 535k labels + // are available (1024 choose 2 ~ 535k). If this value is changed after startup, new projects may receive labels that are already allocated + // to other projects. Prefix may be any valid SELinux set of terms (including user, role, and type), although leaving them as the default + // will allow the server to set them automatically. + // + // Examples: + // * s0:/2 - Allocate labels from s0:c0,c0 to s0:c511,c511 + // * s0:/2,512 - Allocate labels from s0:c0,c0,c0 to s0:c511,c511,511 + // + MCSAllocatorRange string `json:"mcsAllocatorRange"` + // MCSLabelsPerProject defines the number of labels that should be reserved per project. The default is 5 to match the default UID and MCS + // ranges (100k namespaces, 535k/5 labels). + MCSLabelsPerProject int `json:"mcsLabelsPerProject"` +} + +// holds the necessary configuration options for +type PolicyConfig struct { + // UserAgentMatchingConfig controls how API calls from *voluntarily* identifying clients will be handled. THIS DOES NOT DEFEND AGAINST MALICIOUS CLIENTS! + UserAgentMatchingConfig UserAgentMatchingConfig `json:"userAgentMatchingConfig"` +} + +// UserAgentMatchingConfig controls how API calls from *voluntarily* identifying clients will be handled. THIS DOES NOT DEFEND AGAINST MALICIOUS CLIENTS! +type UserAgentMatchingConfig struct { + // If this list is non-empty, then a User-Agent must match one of the UserAgentRegexes to be allowed + RequiredClients []UserAgentMatchRule `json:"requiredClients"` + + // If this list is non-empty, then a User-Agent must not match any of the UserAgentRegexes + DeniedClients []UserAgentDenyRule `json:"deniedClients"` + + // DefaultRejectionMessage is the message shown when rejecting a client. If it is not a set, a generic message is given. + DefaultRejectionMessage string `json:"defaultRejectionMessage"` +} + +// UserAgentMatchRule describes how to match a given request based on User-Agent and HTTPVerb +type UserAgentMatchRule struct { + // UserAgentRegex is a regex that is checked against the User-Agent. + // Known variants of oc clients + // 1. oc accessing kube resources: oc/v1.2.0 (linux/amd64) kubernetes/bc4550d + // 2. oc accessing openshift resources: oc/v1.1.3 (linux/amd64) openshift/b348c2f + // 3. openshift kubectl accessing kube resources: openshift/v1.2.0 (linux/amd64) kubernetes/bc4550d + // 4. openshift kubectl accessing openshift resources: openshift/v1.1.3 (linux/amd64) openshift/b348c2f + // 5. oadm accessing kube resources: oadm/v1.2.0 (linux/amd64) kubernetes/bc4550d + // 6. oadm accessing openshift resources: oadm/v1.1.3 (linux/amd64) openshift/b348c2f + // 7. openshift cli accessing kube resources: openshift/v1.2.0 (linux/amd64) kubernetes/bc4550d + // 8. openshift cli accessing openshift resources: openshift/v1.1.3 (linux/amd64) openshift/b348c2f + Regex string `json:"regex"` + + // HTTPVerbs specifies which HTTP verbs should be matched. An empty list means "match all verbs". + HTTPVerbs []string `json:"httpVerbs"` +} + +// UserAgentDenyRule adds a rejection message that can be used to help a user figure out how to get an approved client +type UserAgentDenyRule struct { + UserAgentMatchRule `json:",inline"` + + // RejectionMessage is the message shown when rejecting a client. If it is not a set, the default message is used. + RejectionMessage string `json:"rejectionMessage"` +} + +// RoutingConfig holds the necessary configuration options for routing to subdomains +type RoutingConfig struct { + // Subdomain is the suffix appended to $service.$namespace. to form the default route hostname + // DEPRECATED: This field is being replaced by routers setting their own defaults. This is the + // "default" route. + Subdomain string `json:"subdomain"` +} + +// MasterNetworkConfig to be passed to the compiled in network plugin +type MasterNetworkConfig struct { + // NetworkPluginName is the name of the network plugin to use + NetworkPluginName string `json:"networkPluginName"` + // ClusterNetworkCIDR is the CIDR string to specify the global overlay network's L3 space. Deprecated, but maintained for backwards compatibility, use ClusterNetworks instead. + DeprecatedClusterNetworkCIDR string `json:"clusterNetworkCIDR,omitempty"` + // ClusterNetworks is a list of ClusterNetwork objects that defines the global overlay network's L3 space by specifying a set of CIDR and netmasks that the SDN can allocate addressed from. If this is specified, then ClusterNetworkCIDR and HostSubnetLength may not be set. + ClusterNetworks []ClusterNetworkEntry `json:"clusterNetworks"` + // HostSubnetLength is the number of bits to allocate to each host's subnet e.g. 8 would mean a /24 network on the host. Deprecated, but maintained for backwards compatibility, use ClusterNetworks instead. + DeprecatedHostSubnetLength uint32 `json:"hostSubnetLength,omitempty"` + // ServiceNetwork is the CIDR string to specify the service networks + ServiceNetworkCIDR string `json:"serviceNetworkCIDR"` + // ExternalIPNetworkCIDRs controls what values are acceptable for the service external IP field. If empty, no externalIP + // may be set. It may contain a list of CIDRs which are checked for access. If a CIDR is prefixed with !, IPs in that + // CIDR will be rejected. Rejections will be applied first, then the IP checked against one of the allowed CIDRs. You + // should ensure this range does not overlap with your nodes, pods, or service CIDRs for security reasons. + ExternalIPNetworkCIDRs []string `json:"externalIPNetworkCIDRs"` + // IngressIPNetworkCIDR controls the range to assign ingress ips from for services of type LoadBalancer on bare + // metal. If empty, ingress ips will not be assigned. It may contain a single CIDR that will be allocated from. + // For security reasons, you should ensure that this range does not overlap with the CIDRs reserved for external ips, + // nodes, pods, or services. + IngressIPNetworkCIDR string `json:"ingressIPNetworkCIDR"` + // VXLANPort is the VXLAN port used by the cluster defaults. If it is not set, 4789 is the default value + VXLANPort uint32 `json:"vxlanPort,omitempty"` +} + +// ClusterNetworkEntry defines an individual cluster network. The CIDRs cannot overlap with other cluster network CIDRs, CIDRs reserved for external ips, CIDRs reserved for service networks, and CIDRs reserved for ingress ips. +type ClusterNetworkEntry struct { + // CIDR defines the total range of a cluster networks address space. + CIDR string `json:"cidr"` + // HostSubnetLength is the number of bits of the accompanying CIDR address to allocate to each node. eg, 8 would mean that each node would have a /24 slice of the overlay network for its pod. + HostSubnetLength uint32 `json:"hostSubnetLength"` +} + +// ImageConfig holds the necessary configuration options for building image names for system components +type ImageConfig struct { + // Format is the format of the name to be built for the system component + Format string `json:"format"` + // Latest determines if the latest tag will be pulled from the registry + Latest bool `json:"latest"` +} + +// RemoteConnectionInfo holds information necessary for establishing a remote connection +type RemoteConnectionInfo struct { + // URL is the remote URL to connect to + URL string `json:"url"` + // CA is the CA for verifying TLS connections + CA string `json:"ca"` + // CertInfo is the TLS client cert information to present + // this is anonymous so that we can inline it for serialization + CertInfo `json:",inline"` +} + +// KubeletConnectionInfo holds information necessary for connecting to a kubelet +type KubeletConnectionInfo struct { + // Port is the port to connect to kubelets on + Port uint `json:"port"` + // CA is the CA for verifying TLS connections to kubelets + CA string `json:"ca"` + // CertInfo is the TLS client cert information for securing communication to kubelets + // this is anonymous so that we can inline it for serialization + CertInfo `json:",inline"` +} + +// EtcdConnectionInfo holds information necessary for connecting to an etcd server +type EtcdConnectionInfo struct { + // URLs are the URLs for etcd + URLs []string `json:"urls"` + // CA is a file containing trusted roots for the etcd server certificates + CA string `json:"ca"` + // CertInfo is the TLS client cert information for securing communication to etcd + // this is anonymous so that we can inline it for serialization + CertInfo `json:",inline"` +} + +// EtcdStorageConfig holds the necessary configuration options for the etcd storage underlying OpenShift and Kubernetes +type EtcdStorageConfig struct { + // KubernetesStorageVersion is the API version that Kube resources in etcd should be + // serialized to. This value should *not* be advanced until all clients in the + // cluster that read from etcd have code that allows them to read the new version. + KubernetesStorageVersion string `json:"kubernetesStorageVersion"` + // KubernetesStoragePrefix is the path within etcd that the Kubernetes resources will + // be rooted under. This value, if changed, will mean existing objects in etcd will + // no longer be located. The default value is 'kubernetes.io'. + KubernetesStoragePrefix string `json:"kubernetesStoragePrefix"` + // OpenShiftStorageVersion is the API version that OS resources in etcd should be + // serialized to. This value should *not* be advanced until all clients in the + // cluster that read from etcd have code that allows them to read the new version. + OpenShiftStorageVersion string `json:"openShiftStorageVersion"` + // OpenShiftStoragePrefix is the path within etcd that the OpenShift resources will + // be rooted under. This value, if changed, will mean existing objects in etcd will + // no longer be located. The default value is 'openshift.io'. + OpenShiftStoragePrefix string `json:"openShiftStoragePrefix"` +} + +// ServingInfo holds information about serving web pages +type ServingInfo struct { + // BindAddress is the ip:port to serve on + BindAddress string `json:"bindAddress"` + // BindNetwork is the type of network to bind to - defaults to "tcp4", accepts "tcp", + // "tcp4", and "tcp6" + BindNetwork string `json:"bindNetwork"` + // CertInfo is the TLS cert info for serving secure traffic. + // this is anonymous so that we can inline it for serialization + CertInfo `json:",inline"` + // ClientCA is the certificate bundle for all the signers that you'll recognize for incoming client certificates + ClientCA string `json:"clientCA"` + // NamedCertificates is a list of certificates to use to secure requests to specific hostnames + NamedCertificates []NamedCertificate `json:"namedCertificates"` + // MinTLSVersion is the minimum TLS version supported. + // Values must match version names from https://golang.org/pkg/crypto/tls/#pkg-constants + MinTLSVersion string `json:"minTLSVersion,omitempty"` + // CipherSuites contains an overridden list of ciphers for the server to support. + // Values must match cipher suite IDs from https://golang.org/pkg/crypto/tls/#pkg-constants + CipherSuites []string `json:"cipherSuites,omitempty"` +} + +// NamedCertificate specifies a certificate/key, and the names it should be served for +type NamedCertificate struct { + // Names is a list of DNS names this certificate should be used to secure + // A name can be a normal DNS name, or can contain leading wildcard segments. + Names []string `json:"names"` + // CertInfo is the TLS cert info for serving secure traffic + CertInfo `json:",inline"` +} + +// HTTPServingInfo holds configuration for serving HTTP +type HTTPServingInfo struct { + // ServingInfo is the HTTP serving information + ServingInfo `json:",inline"` + // MaxRequestsInFlight is the number of concurrent requests allowed to the server. If zero, no limit. + MaxRequestsInFlight int `json:"maxRequestsInFlight"` + // RequestTimeoutSeconds is the number of seconds before requests are timed out. The default is 60 minutes, if + // -1 there is no limit on requests. + RequestTimeoutSeconds int `json:"requestTimeoutSeconds"` +} + +// MasterClients holds references to `.kubeconfig` files that qualify master clients for OpenShift and Kubernetes +type MasterClients struct { + // OpenShiftLoopbackKubeConfig is a .kubeconfig filename for system components to loopback to this master + OpenShiftLoopbackKubeConfig string `json:"openshiftLoopbackKubeConfig"` + + // OpenShiftLoopbackClientConnectionOverrides specifies client overrides for system components to loop back to this master. + OpenShiftLoopbackClientConnectionOverrides *ClientConnectionOverrides `json:"openshiftLoopbackClientConnectionOverrides"` +} + +// ClientConnectionOverrides are a set of overrides to the default client connection settings. +type ClientConnectionOverrides struct { + // AcceptContentTypes defines the Accept header sent by clients when connecting to a server, overriding the + // default value of 'application/json'. This field will control all connections to the server used by a particular + // client. + AcceptContentTypes string `json:"acceptContentTypes"` + // ContentType is the content type used when sending data to the server from this client. + ContentType string `json:"contentType"` + + // QPS controls the number of queries per second allowed for this connection. + QPS float32 `json:"qps"` + // Burst allows extra queries to accumulate when a client is exceeding its rate. + Burst int32 `json:"burst"` +} + +// DNSConfig holds the necessary configuration options for DNS +type DNSConfig struct { + // BindAddress is the ip:port to serve DNS on + BindAddress string `json:"bindAddress"` + // BindNetwork is the type of network to bind to - defaults to "tcp4", accepts "tcp", + // "tcp4", and "tcp6" + BindNetwork string `json:"bindNetwork"` + // AllowRecursiveQueries allows the DNS server on the master to answer queries recursively. Note that open + // resolvers can be used for DNS amplification attacks and the master DNS should not be made accessible + // to public networks. + AllowRecursiveQueries bool `json:"allowRecursiveQueries"` +} + +// WebhookTokenAuthenticators holds the necessary configuation options for +// external token authenticators +type WebhookTokenAuthenticator struct { + // ConfigFile is a path to a Kubeconfig file with the webhook configuration + ConfigFile string `json:"configFile"` + // CacheTTL indicates how long an authentication result should be cached. + // It takes a valid time duration string (e.g. "5m"). + // If empty, you get a default timeout of 2 minutes. + // If zero (e.g. "0m"), caching is disabled + CacheTTL string `json:"cacheTTL"` +} + +// OAuthConfig holds the necessary configuration options for OAuth authentication +type OAuthConfig struct { + // MasterCA is the CA for verifying the TLS connection back to the MasterURL. + MasterCA *string `json:"masterCA"` + + // MasterURL is used for making server-to-server calls to exchange authorization codes for access tokens + MasterURL string `json:"masterURL"` + + // MasterPublicURL is used for building valid client redirect URLs for internal and external access + MasterPublicURL string `json:"masterPublicURL"` + + // AssetPublicURL is used for building valid client redirect URLs for external access + AssetPublicURL string `json:"assetPublicURL"` + + // AlwaysShowProviderSelection will force the provider selection page to render even when there is only a single provider. + AlwaysShowProviderSelection bool `json:"alwaysShowProviderSelection"` + + //IdentityProviders is an ordered list of ways for a user to identify themselves + IdentityProviders []IdentityProvider `json:"identityProviders"` + + // GrantConfig describes how to handle grants + GrantConfig GrantConfig `json:"grantConfig"` + + // SessionConfig hold information about configuring sessions. + SessionConfig *SessionConfig `json:"sessionConfig"` + + // TokenConfig contains options for authorization and access tokens + TokenConfig TokenConfig `json:"tokenConfig"` + + // Templates allow you to customize pages like the login page. + Templates *OAuthTemplates `json:"templates"` +} + +// OAuthTemplates allow for customization of pages like the login page +type OAuthTemplates struct { + // Login is a path to a file containing a go template used to render the login page. + // If unspecified, the default login page is used. + Login string `json:"login"` + + // ProviderSelection is a path to a file containing a go template used to render the provider selection page. + // If unspecified, the default provider selection page is used. + ProviderSelection string `json:"providerSelection"` + + // Error is a path to a file containing a go template used to render error pages during the authentication or grant flow + // If unspecified, the default error page is used. + Error string `json:"error"` +} + +// ServiceAccountConfig holds the necessary configuration options for a service account +type ServiceAccountConfig struct { + // ManagedNames is a list of service account names that will be auto-created in every namespace. + // If no names are specified, the ServiceAccountsController will not be started. + ManagedNames []string `json:"managedNames"` + + // LimitSecretReferences controls whether or not to allow a service account to reference any secret in a namespace + // without explicitly referencing them + LimitSecretReferences bool `json:"limitSecretReferences"` + + // PrivateKeyFile is a file containing a PEM-encoded private RSA key, used to sign service account tokens. + // If no private key is specified, the service account TokensController will not be started. + PrivateKeyFile string `json:"privateKeyFile"` + + // PublicKeyFiles is a list of files, each containing a PEM-encoded public RSA key. + // (If any file contains a private key, the public portion of the key is used) + // The list of public keys is used to verify presented service account tokens. + // Each key is tried in order until the list is exhausted or verification succeeds. + // If no keys are specified, no service account authentication will be available. + PublicKeyFiles []string `json:"publicKeyFiles"` + + // MasterCA is the CA for verifying the TLS connection back to the master. The service account controller will automatically + // inject the contents of this file into pods so they can verify connections to the master. + MasterCA string `json:"masterCA"` +} + +// TokenConfig holds the necessary configuration options for authorization and access tokens +type TokenConfig struct { + // AuthorizeTokenMaxAgeSeconds defines the maximum age of authorize tokens + AuthorizeTokenMaxAgeSeconds int32 `json:"authorizeTokenMaxAgeSeconds"` + // AccessTokenMaxAgeSeconds defines the maximum age of access tokens + AccessTokenMaxAgeSeconds int32 `json:"accessTokenMaxAgeSeconds"` + // AccessTokenInactivityTimeoutSeconds defined the default token + // inactivity timeout for tokens granted by any client. + // Setting it to nil means the feature is completely disabled (default) + // The default setting can be overriden on OAuthClient basis. + // The value represents the maximum amount of time that can occur between + // consecutive uses of the token. Tokens become invalid if they are not + // used within this temporal window. The user will need to acquire a new + // token to regain access once a token times out. + // Valid values are: + // - 0: Tokens never time out + // - X: Tokens time out if there is no activity for X seconds + // The current minimum allowed value for X is 300 (5 minutes) + AccessTokenInactivityTimeoutSeconds *int32 `json:"accessTokenInactivityTimeoutSeconds,omitempty"` +} + +// SessionConfig specifies options for cookie-based sessions. Used by AuthRequestHandlerSession +type SessionConfig struct { + // SessionSecretsFile is a reference to a file containing a serialized SessionSecrets object + // If no file is specified, a random signing and encryption key are generated at each server start + SessionSecretsFile string `json:"sessionSecretsFile"` + // SessionMaxAgeSeconds specifies how long created sessions last. Used by AuthRequestHandlerSession + SessionMaxAgeSeconds int32 `json:"sessionMaxAgeSeconds"` + // SessionName is the cookie name used to store the session + SessionName string `json:"sessionName"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// SessionSecrets list the secrets to use to sign/encrypt and authenticate/decrypt created sessions. +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +// +openshift:compatibility-gen:internal +type SessionSecrets struct { + metav1.TypeMeta `json:",inline"` + + // Secrets is a list of secrets + // New sessions are signed and encrypted using the first secret. + // Existing sessions are decrypted/authenticated by each secret until one succeeds. This allows rotating secrets. + Secrets []SessionSecret `json:"secrets"` +} + +// SessionSecret is a secret used to authenticate/decrypt cookie-based sessions +type SessionSecret struct { + // Authentication is used to authenticate sessions using HMAC. Recommended to use a secret with 32 or 64 bytes. + Authentication string `json:"authentication"` + // Encryption is used to encrypt sessions. Must be 16, 24, or 32 characters long, to select AES-128, AES- + Encryption string `json:"encryption"` +} + +// IdentityProvider provides identities for users authenticating using credentials +type IdentityProvider struct { + // Name is used to qualify the identities returned by this provider + Name string `json:"name"` + // UseAsChallenger indicates whether to issue WWW-Authenticate challenges for this provider + UseAsChallenger bool `json:"challenge"` + // UseAsLogin indicates whether to use this identity provider for unauthenticated browsers to login against + UseAsLogin bool `json:"login"` + // MappingMethod determines how identities from this provider are mapped to users + MappingMethod string `json:"mappingMethod"` + // Provider contains the information about how to set up a specific identity provider + Provider runtime.RawExtension `json:"provider"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// BasicAuthPasswordIdentityProvider provides identities for users authenticating using HTTP basic auth credentials +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +// +openshift:compatibility-gen:internal +type BasicAuthPasswordIdentityProvider struct { + metav1.TypeMeta `json:",inline"` + + // RemoteConnectionInfo contains information about how to connect to the external basic auth server + RemoteConnectionInfo `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// AllowAllPasswordIdentityProvider provides identities for users authenticating using non-empty passwords +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +// +openshift:compatibility-gen:internal +type AllowAllPasswordIdentityProvider struct { + metav1.TypeMeta `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DenyAllPasswordIdentityProvider provides no identities for users +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +// +openshift:compatibility-gen:internal +type DenyAllPasswordIdentityProvider struct { + metav1.TypeMeta `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// HTPasswdPasswordIdentityProvider provides identities for users authenticating using htpasswd credentials +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +// +openshift:compatibility-gen:internal +type HTPasswdPasswordIdentityProvider struct { + metav1.TypeMeta `json:",inline"` + + // File is a reference to your htpasswd file + File string `json:"file"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// LDAPPasswordIdentityProvider provides identities for users authenticating using LDAP credentials +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +// +openshift:compatibility-gen:internal +type LDAPPasswordIdentityProvider struct { + metav1.TypeMeta `json:",inline"` + // URL is an RFC 2255 URL which specifies the LDAP search parameters to use. The syntax of the URL is + // ldap://host:port/basedn?attribute?scope?filter + URL string `json:"url"` + // BindDN is an optional DN to bind with during the search phase. + BindDN string `json:"bindDN"` + // BindPassword is an optional password to bind with during the search phase. + BindPassword StringSource `json:"bindPassword"` + + // Insecure, if true, indicates the connection should not use TLS. + // Cannot be set to true with a URL scheme of "ldaps://" + // If false, "ldaps://" URLs connect using TLS, and "ldap://" URLs are upgraded to a TLS connection using StartTLS as specified in https://tools.ietf.org/html/rfc2830 + Insecure bool `json:"insecure"` + // CA is the optional trusted certificate authority bundle to use when making requests to the server + // If empty, the default system roots are used + CA string `json:"ca"` + // Attributes maps LDAP attributes to identities + Attributes LDAPAttributeMapping `json:"attributes"` +} + +// LDAPAttributeMapping maps LDAP attributes to OpenShift identity fields +type LDAPAttributeMapping struct { + // ID is the list of attributes whose values should be used as the user ID. Required. + // LDAP standard identity attribute is "dn" + ID []string `json:"id"` + // PreferredUsername is the list of attributes whose values should be used as the preferred username. + // LDAP standard login attribute is "uid" + PreferredUsername []string `json:"preferredUsername"` + // Name is the list of attributes whose values should be used as the display name. Optional. + // If unspecified, no display name is set for the identity + // LDAP standard display name attribute is "cn" + Name []string `json:"name"` + // Email is the list of attributes whose values should be used as the email address. Optional. + // If unspecified, no email is set for the identity + Email []string `json:"email"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// KeystonePasswordIdentityProvider provides identities for users authenticating using keystone password credentials +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +// +openshift:compatibility-gen:internal +type KeystonePasswordIdentityProvider struct { + metav1.TypeMeta `json:",inline"` + // RemoteConnectionInfo contains information about how to connect to the keystone server + RemoteConnectionInfo `json:",inline"` + // Domain Name is required for keystone v3 + DomainName string `json:"domainName"` + // UseKeystoneIdentity flag indicates that user should be authenticated by keystone ID, not by username + UseKeystoneIdentity bool `json:"useKeystoneIdentity"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RequestHeaderIdentityProvider provides identities for users authenticating using request header credentials +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +// +openshift:compatibility-gen:internal +type RequestHeaderIdentityProvider struct { + metav1.TypeMeta `json:",inline"` + + // LoginURL is a URL to redirect unauthenticated /authorize requests to + // Unauthenticated requests from OAuth clients which expect interactive logins will be redirected here + // ${url} is replaced with the current URL, escaped to be safe in a query parameter + // https://www.example.com/sso-login?then=${url} + // ${query} is replaced with the current query string + // https://www.example.com/auth-proxy/oauth/authorize?${query} + LoginURL string `json:"loginURL"` + + // ChallengeURL is a URL to redirect unauthenticated /authorize requests to + // Unauthenticated requests from OAuth clients which expect WWW-Authenticate challenges will be redirected here + // ${url} is replaced with the current URL, escaped to be safe in a query parameter + // https://www.example.com/sso-login?then=${url} + // ${query} is replaced with the current query string + // https://www.example.com/auth-proxy/oauth/authorize?${query} + ChallengeURL string `json:"challengeURL"` + + // ClientCA is a file with the trusted signer certs. If empty, no request verification is done, and any direct request to the OAuth server can impersonate any identity from this provider, merely by setting a request header. + ClientCA string `json:"clientCA"` + // ClientCommonNames is an optional list of common names to require a match from. If empty, any client certificate validated against the clientCA bundle is considered authoritative. + ClientCommonNames []string `json:"clientCommonNames"` + + // Headers is the set of headers to check for identity information + Headers []string `json:"headers"` + // PreferredUsernameHeaders is the set of headers to check for the preferred username + PreferredUsernameHeaders []string `json:"preferredUsernameHeaders"` + // NameHeaders is the set of headers to check for the display name + NameHeaders []string `json:"nameHeaders"` + // EmailHeaders is the set of headers to check for the email address + EmailHeaders []string `json:"emailHeaders"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// GitHubIdentityProvider provides identities for users authenticating using GitHub credentials +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +// +openshift:compatibility-gen:internal +type GitHubIdentityProvider struct { + metav1.TypeMeta `json:",inline"` + + // ClientID is the oauth client ID + ClientID string `json:"clientID"` + // ClientSecret is the oauth client secret + ClientSecret StringSource `json:"clientSecret"` + // Organizations optionally restricts which organizations are allowed to log in + Organizations []string `json:"organizations"` + // Teams optionally restricts which teams are allowed to log in. Format is /. + Teams []string `json:"teams"` + // Hostname is the optional domain (e.g. "mycompany.com") for use with a hosted instance of GitHub Enterprise. + // It must match the GitHub Enterprise settings value that is configured at /setup/settings#hostname. + Hostname string `json:"hostname"` + // CA is the optional trusted certificate authority bundle to use when making requests to the server. + // If empty, the default system roots are used. This can only be configured when hostname is set to a non-empty value. + CA string `json:"ca"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// GitLabIdentityProvider provides identities for users authenticating using GitLab credentials +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +// +openshift:compatibility-gen:internal +type GitLabIdentityProvider struct { + metav1.TypeMeta `json:",inline"` + + // CA is the optional trusted certificate authority bundle to use when making requests to the server + // If empty, the default system roots are used + CA string `json:"ca"` + // URL is the oauth server base URL + URL string `json:"url"` + // ClientID is the oauth client ID + ClientID string `json:"clientID"` + // ClientSecret is the oauth client secret + ClientSecret StringSource `json:"clientSecret"` + // Legacy determines if OAuth2 or OIDC should be used + // If true, OAuth2 is used + // If false, OIDC is used + // If nil and the URL's host is gitlab.com, OIDC is used + // Otherwise, OAuth2 is used + // In a future release, nil will default to using OIDC + // Eventually this flag will be removed and only OIDC will be used + Legacy *bool `json:"legacy,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// GoogleIdentityProvider provides identities for users authenticating using Google credentials +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +// +openshift:compatibility-gen:internal +type GoogleIdentityProvider struct { + metav1.TypeMeta `json:",inline"` + + // ClientID is the oauth client ID + ClientID string `json:"clientID"` + // ClientSecret is the oauth client secret + ClientSecret StringSource `json:"clientSecret"` + + // HostedDomain is the optional Google App domain (e.g. "mycompany.com") to restrict logins to + HostedDomain string `json:"hostedDomain"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// OpenIDIdentityProvider provides identities for users authenticating using OpenID credentials +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +// +openshift:compatibility-gen:internal +type OpenIDIdentityProvider struct { + metav1.TypeMeta `json:",inline"` + + // CA is the optional trusted certificate authority bundle to use when making requests to the server + // If empty, the default system roots are used + CA string `json:"ca"` + + // ClientID is the oauth client ID + ClientID string `json:"clientID"` + // ClientSecret is the oauth client secret + ClientSecret StringSource `json:"clientSecret"` + + // ExtraScopes are any scopes to request in addition to the standard "openid" scope. + ExtraScopes []string `json:"extraScopes"` + + // ExtraAuthorizeParameters are any custom parameters to add to the authorize request. + ExtraAuthorizeParameters map[string]string `json:"extraAuthorizeParameters"` + + // URLs to use to authenticate + URLs OpenIDURLs `json:"urls"` + + // Claims mappings + Claims OpenIDClaims `json:"claims"` +} + +// OpenIDURLs are URLs to use when authenticating with an OpenID identity provider +type OpenIDURLs struct { + // Authorize is the oauth authorization URL + Authorize string `json:"authorize"` + // Token is the oauth token granting URL + Token string `json:"token"` + // UserInfo is the optional userinfo URL. + // If present, a granted access_token is used to request claims + // If empty, a granted id_token is parsed for claims + UserInfo string `json:"userInfo"` +} + +// OpenIDClaims contains a list of OpenID claims to use when authenticating with an OpenID identity provider +type OpenIDClaims struct { + // ID is the list of claims whose values should be used as the user ID. Required. + // OpenID standard identity claim is "sub" + ID []string `json:"id"` + // PreferredUsername is the list of claims whose values should be used as the preferred username. + // If unspecified, the preferred username is determined from the value of the id claim + PreferredUsername []string `json:"preferredUsername"` + // Name is the list of claims whose values should be used as the display name. Optional. + // If unspecified, no display name is set for the identity + Name []string `json:"name"` + // Email is the list of claims whose values should be used as the email address. Optional. + // If unspecified, no email is set for the identity + Email []string `json:"email"` +} + +// GrantConfig holds the necessary configuration options for grant handlers +type GrantConfig struct { + // Method determines the default strategy to use when an OAuth client requests a grant. + // This method will be used only if the specific OAuth client doesn't provide a strategy + // of their own. Valid grant handling methods are: + // - auto: always approves grant requests, useful for trusted clients + // - prompt: prompts the end user for approval of grant requests, useful for third-party clients + // - deny: always denies grant requests, useful for black-listed clients + Method GrantHandlerType `json:"method"` + + // ServiceAccountMethod is used for determining client authorization for service account oauth client. + // It must be either: deny, prompt + ServiceAccountMethod GrantHandlerType `json:"serviceAccountMethod"` +} + +type GrantHandlerType string + +const ( + // GrantHandlerAuto auto-approves client authorization grant requests + GrantHandlerAuto GrantHandlerType = "auto" + // GrantHandlerPrompt prompts the user to approve new client authorization grant requests + GrantHandlerPrompt GrantHandlerType = "prompt" + // GrantHandlerDeny auto-denies client authorization grant requests + GrantHandlerDeny GrantHandlerType = "deny" +) + +// EtcdConfig holds the necessary configuration options for connecting with an etcd database +type EtcdConfig struct { + // ServingInfo describes how to start serving the etcd master + ServingInfo ServingInfo `json:"servingInfo"` + // Address is the advertised host:port for client connections to etcd + Address string `json:"address"` + // PeerServingInfo describes how to start serving the etcd peer + PeerServingInfo ServingInfo `json:"peerServingInfo"` + // PeerAddress is the advertised host:port for peer connections to etcd + PeerAddress string `json:"peerAddress"` + + // StorageDir is the path to the etcd storage directory + StorageDir string `json:"storageDirectory"` +} + +// KubernetesMasterConfig holds the necessary configuration options for the Kubernetes master +type KubernetesMasterConfig struct { + // APILevels is a list of API levels that should be enabled on startup: v1 as examples + APILevels []string `json:"apiLevels"` + // DisabledAPIGroupVersions is a map of groups to the versions (or *) that should be disabled. + DisabledAPIGroupVersions map[string][]string `json:"disabledAPIGroupVersions"` + + // MasterIP is the public IP address of kubernetes stuff. If empty, the first result from net.InterfaceAddrs will be used. + MasterIP string `json:"masterIP"` + // MasterEndpointReconcileTTL sets the time to live in seconds of an endpoint record recorded by each master. The endpoints are checked + // at an interval that is 2/3 of this value and this value defaults to 15s if unset. In very large clusters, this value may be increased to + // reduce the possibility that the master endpoint record expires (due to other load on the etcd server) and causes masters to drop in and + // out of the kubernetes service record. It is not recommended to set this value below 15s. + MasterEndpointReconcileTTL int `json:"masterEndpointReconcileTTL"` + // ServicesSubnet is the subnet to use for assigning service IPs + ServicesSubnet string `json:"servicesSubnet"` + // ServicesNodePortRange is the range to use for assigning service public ports on a host. + ServicesNodePortRange string `json:"servicesNodePortRange"` + + // SchedulerConfigFile points to a file that describes how to set up the scheduler. If empty, you get the default scheduling rules. + SchedulerConfigFile string `json:"schedulerConfigFile"` + + // PodEvictionTimeout controls grace period for deleting pods on failed nodes. + // It takes valid time duration string. If empty, you get the default pod eviction timeout. + PodEvictionTimeout string `json:"podEvictionTimeout"` + // ProxyClientInfo specifies the client cert/key to use when proxying to pods + ProxyClientInfo CertInfo `json:"proxyClientInfo"` + + // APIServerArguments are key value pairs that will be passed directly to the Kube apiserver that match the apiservers's + // command line arguments. These are not migrated, but if you reference a value that does not exist the server will not + // start. These values may override other settings in KubernetesMasterConfig which may cause invalid configurations. + APIServerArguments ExtendedArguments `json:"apiServerArguments"` + // ControllerArguments are key value pairs that will be passed directly to the Kube controller manager that match the + // controller manager's command line arguments. These are not migrated, but if you reference a value that does not exist + // the server will not start. These values may override other settings in KubernetesMasterConfig which may cause invalid + // configurations. + ControllerArguments ExtendedArguments `json:"controllerArguments"` + // SchedulerArguments are key value pairs that will be passed directly to the Kube scheduler that match the scheduler's + // command line arguments. These are not migrated, but if you reference a value that does not exist the server will not + // start. These values may override other settings in KubernetesMasterConfig which may cause invalid configurations. + SchedulerArguments ExtendedArguments `json:"schedulerArguments"` +} + +// CertInfo relates a certificate with a private key +type CertInfo struct { + // CertFile is a file containing a PEM-encoded certificate + CertFile string `json:"certFile"` + // KeyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile + KeyFile string `json:"keyFile"` +} + +// PodManifestConfig holds the necessary configuration options for using pod manifests +type PodManifestConfig struct { + // Path specifies the path for the pod manifest file or directory + // If its a directory, its expected to contain on or more manifest files + // This is used by the Kubelet to create pods on the node + Path string `json:"path"` + // FileCheckIntervalSeconds is the interval in seconds for checking the manifest file(s) for new data + // The interval needs to be a positive value + FileCheckIntervalSeconds int64 `json:"fileCheckIntervalSeconds"` +} + +// StringSource allows specifying a string inline, or externally via env var or file. +// When it contains only a string value, it marshals to a simple JSON string. +type StringSource struct { + // StringSourceSpec specifies the string value, or external location + StringSourceSpec `json:",inline"` +} + +// StringSourceSpec specifies a string value, or external location +type StringSourceSpec struct { + // Value specifies the cleartext value, or an encrypted value if keyFile is specified. + Value string `json:"value"` + + // Env specifies an envvar containing the cleartext value, or an encrypted value if the keyFile is specified. + Env string `json:"env"` + + // File references a file containing the cleartext value, or an encrypted value if a keyFile is specified. + File string `json:"file"` + + // KeyFile references a file containing the key to use to decrypt the value. + KeyFile string `json:"keyFile"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// LDAPSyncConfig holds the necessary configuration options to define an LDAP group sync +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +// +openshift:compatibility-gen:internal +type LDAPSyncConfig struct { + metav1.TypeMeta `json:",inline"` + // Host is the scheme, host and port of the LDAP server to connect to: + // scheme://host:port + URL string `json:"url"` + // BindDN is an optional DN to bind to the LDAP server with + BindDN string `json:"bindDN"` + // BindPassword is an optional password to bind with during the search phase. + BindPassword StringSource `json:"bindPassword"` + + // Insecure, if true, indicates the connection should not use TLS. + // Cannot be set to true with a URL scheme of "ldaps://" + // If false, "ldaps://" URLs connect using TLS, and "ldap://" URLs are upgraded to a TLS connection using StartTLS as specified in https://tools.ietf.org/html/rfc2830 + Insecure bool `json:"insecure"` + // CA is the optional trusted certificate authority bundle to use when making requests to the server + // If empty, the default system roots are used + CA string `json:"ca"` + + // LDAPGroupUIDToOpenShiftGroupNameMapping is an optional direct mapping of LDAP group UIDs to + // OpenShift Group names + LDAPGroupUIDToOpenShiftGroupNameMapping map[string]string `json:"groupUIDNameMapping"` + + // RFC2307Config holds the configuration for extracting data from an LDAP server set up in a fashion + // similar to RFC2307: first-class group and user entries, with group membership determined by a + // multi-valued attribute on the group entry listing its members + RFC2307Config *RFC2307Config `json:"rfc2307,omitempty"` + + // ActiveDirectoryConfig holds the configuration for extracting data from an LDAP server set up in a + // fashion similar to that used in Active Directory: first-class user entries, with group membership + // determined by a multi-valued attribute on members listing groups they are a member of + ActiveDirectoryConfig *ActiveDirectoryConfig `json:"activeDirectory,omitempty"` + + // AugmentedActiveDirectoryConfig holds the configuration for extracting data from an LDAP server + // set up in a fashion similar to that used in Active Directory as described above, with one addition: + // first-class group entries exist and are used to hold metadata but not group membership + AugmentedActiveDirectoryConfig *AugmentedActiveDirectoryConfig `json:"augmentedActiveDirectory,omitempty"` +} + +// RFC2307Config holds the necessary configuration options to define how an LDAP group sync interacts with an LDAP +// server using the RFC2307 schema +type RFC2307Config struct { + // AllGroupsQuery holds the template for an LDAP query that returns group entries. + AllGroupsQuery LDAPQuery `json:"groupsQuery"` + + // GroupUIDAttributes defines which attribute on an LDAP group entry will be interpreted as its unique identifier. + // (ldapGroupUID) + GroupUIDAttribute string `json:"groupUIDAttribute"` + + // GroupNameAttributes defines which attributes on an LDAP group entry will be interpreted as its name to use for + // an OpenShift group + GroupNameAttributes []string `json:"groupNameAttributes"` + + // GroupMembershipAttributes defines which attributes on an LDAP group entry will be interpreted as its members. + // The values contained in those attributes must be queryable by your UserUIDAttribute + GroupMembershipAttributes []string `json:"groupMembershipAttributes"` + + // AllUsersQuery holds the template for an LDAP query that returns user entries. + AllUsersQuery LDAPQuery `json:"usersQuery"` + + // UserUIDAttribute defines which attribute on an LDAP user entry will be interpreted as its unique identifier. + // It must correspond to values that will be found from the GroupMembershipAttributes + UserUIDAttribute string `json:"userUIDAttribute"` + + // UserNameAttributes defines which attributes on an LDAP user entry will be used, in order, as its OpenShift user name. + // The first attribute with a non-empty value is used. This should match your PreferredUsername setting for your LDAPPasswordIdentityProvider + UserNameAttributes []string `json:"userNameAttributes"` + + // TolerateMemberNotFoundErrors determines the behavior of the LDAP sync job when missing user entries are + // encountered. If 'true', an LDAP query for users that doesn't find any will be tolerated and an only + // and error will be logged. If 'false', the LDAP sync job will fail if a query for users doesn't find + // any. The default value is 'false'. Misconfigured LDAP sync jobs with this flag set to 'true' can cause + // group membership to be removed, so it is recommended to use this flag with caution. + TolerateMemberNotFoundErrors bool `json:"tolerateMemberNotFoundErrors"` + + // TolerateMemberOutOfScopeErrors determines the behavior of the LDAP sync job when out-of-scope user entries + // are encountered. If 'true', an LDAP query for a user that falls outside of the base DN given for the all + // user query will be tolerated and only an error will be logged. If 'false', the LDAP sync job will fail + // if a user query would search outside of the base DN specified by the all user query. Misconfigured LDAP + // sync jobs with this flag set to 'true' can result in groups missing users, so it is recommended to use + // this flag with caution. + TolerateMemberOutOfScopeErrors bool `json:"tolerateMemberOutOfScopeErrors"` +} + +// ActiveDirectoryConfig holds the necessary configuration options to define how an LDAP group sync interacts with an LDAP +// server using the Active Directory schema +type ActiveDirectoryConfig struct { + // AllUsersQuery holds the template for an LDAP query that returns user entries. + AllUsersQuery LDAPQuery `json:"usersQuery"` + + // UserNameAttributes defines which attributes on an LDAP user entry will be interpreted as its OpenShift user name. + UserNameAttributes []string `json:"userNameAttributes"` + + // GroupMembershipAttributes defines which attributes on an LDAP user entry will be interpreted + // as the groups it is a member of + GroupMembershipAttributes []string `json:"groupMembershipAttributes"` +} + +// AugmentedActiveDirectoryConfig holds the necessary configuration options to define how an LDAP group sync interacts with an LDAP +// server using the augmented Active Directory schema +type AugmentedActiveDirectoryConfig struct { + // AllUsersQuery holds the template for an LDAP query that returns user entries. + AllUsersQuery LDAPQuery `json:"usersQuery"` + + // UserNameAttributes defines which attributes on an LDAP user entry will be interpreted as its OpenShift user name. + UserNameAttributes []string `json:"userNameAttributes"` + + // GroupMembershipAttributes defines which attributes on an LDAP user entry will be interpreted + // as the groups it is a member of + GroupMembershipAttributes []string `json:"groupMembershipAttributes"` + + // AllGroupsQuery holds the template for an LDAP query that returns group entries. + AllGroupsQuery LDAPQuery `json:"groupsQuery"` + + // GroupUIDAttributes defines which attribute on an LDAP group entry will be interpreted as its unique identifier. + // (ldapGroupUID) + GroupUIDAttribute string `json:"groupUIDAttribute"` + + // GroupNameAttributes defines which attributes on an LDAP group entry will be interpreted as its name to use for + // an OpenShift group + GroupNameAttributes []string `json:"groupNameAttributes"` +} + +// LDAPQuery holds the options necessary to build an LDAP query +type LDAPQuery struct { + // The DN of the branch of the directory where all searches should start from + BaseDN string `json:"baseDN"` + + // The (optional) scope of the search. Can be: + // base: only the base object, + // one: all object on the base level, + // sub: the entire subtree + // Defaults to the entire subtree if not set + Scope string `json:"scope"` + + // The (optional) behavior of the search with regards to alisases. Can be: + // never: never dereference aliases, + // search: only dereference in searching, + // base: only dereference in finding the base object, + // always: always dereference + // Defaults to always dereferencing if not set + DerefAliases string `json:"derefAliases"` + + // TimeLimit holds the limit of time in seconds that any request to the server can remain outstanding + // before the wait for a response is given up. If this is 0, no client-side limit is imposed + TimeLimit int `json:"timeout"` + + // Filter is a valid LDAP search filter that retrieves all relevant entries from the LDAP server with the base DN + Filter string `json:"filter"` + + // PageSize is the maximum preferred page size, measured in LDAP entries. A page size of 0 means no paging will be done. + PageSize int `json:"pageSize"` +} + +// AdmissionPluginConfig holds the necessary configuration options for admission plugins +type AdmissionPluginConfig struct { + // Location is the path to a configuration file that contains the plugin's + // configuration + Location string `json:"location"` + + // Configuration is an embedded configuration object to be used as the plugin's + // configuration. If present, it will be used instead of the path to the configuration file. + Configuration runtime.RawExtension `json:"configuration"` +} + +// AdmissionConfig holds the necessary configuration options for admission +type AdmissionConfig struct { + // PluginConfig allows specifying a configuration file per admission control plugin + PluginConfig map[string]*AdmissionPluginConfig `json:"pluginConfig"` + + // PluginOrderOverride is a list of admission control plugin names that will be installed + // on the master. Order is significant. If empty, a default list of plugins is used. + PluginOrderOverride []string `json:"pluginOrderOverride,omitempty"` +} + +// ControllerConfig holds configuration values for controllers +type ControllerConfig struct { + // Controllers is a list of controllers to enable. '*' enables all on-by-default controllers, 'foo' enables the controller "+ + // named 'foo', '-foo' disables the controller named 'foo'. + // Defaults to "*". + Controllers []string `json:"controllers"` + // Election defines the configuration for electing a controller instance to make changes to + // the cluster. If unspecified, the ControllerTTL value is checked to determine whether the + // legacy direct etcd election code will be used. + Election *ControllerElectionConfig `json:"election"` + // ServiceServingCert holds configuration for service serving cert signer which creates cert/key pairs for + // pods fulfilling a service to serve with. + ServiceServingCert ServiceServingCert `json:"serviceServingCert"` +} + +// ControllerElectionConfig contains configuration values for deciding how a controller +// will be elected to act as leader. +type ControllerElectionConfig struct { + // LockName is the resource name used to act as the lock for determining which controller + // instance should lead. + LockName string `json:"lockName"` + // LockNamespace is the resource namespace used to act as the lock for determining which + // controller instance should lead. It defaults to "kube-system" + LockNamespace string `json:"lockNamespace"` + // LockResource is the group and resource name to use to coordinate for the controller lock. + // If unset, defaults to "configmaps". + LockResource GroupResource `json:"lockResource"` +} + +// GroupResource points to a resource by its name and API group. +type GroupResource struct { + // Group is the name of an API group + Group string `json:"group"` + // Resource is the name of a resource. + Resource string `json:"resource"` +} + +// ServiceServingCert holds configuration for service serving cert signer which creates cert/key pairs for +// pods fulfilling a service to serve with. +type ServiceServingCert struct { + // Signer holds the signing information used to automatically sign serving certificates. + // If this value is nil, then certs are not signed automatically. + Signer *CertInfo `json:"signer"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DefaultAdmissionConfig can be used to enable or disable various admission plugins. +// When this type is present as the `configuration` object under `pluginConfig` and *if* the admission plugin supports it, +// this will cause an "off by default" admission plugin to be enabled +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +// +openshift:compatibility-gen:internal +type DefaultAdmissionConfig struct { + metav1.TypeMeta `json:",inline"` + + // Disable turns off an admission plugin that is enabled by default. + Disable bool `json:"disable"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// BuildDefaultsConfig controls the default information for Builds +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +// +openshift:compatibility-gen:internal +type BuildDefaultsConfig struct { + metav1.TypeMeta `json:",inline"` + + // gitHTTPProxy is the location of the HTTPProxy for Git source + GitHTTPProxy string `json:"gitHTTPProxy,omitempty"` + + // gitHTTPSProxy is the location of the HTTPSProxy for Git source + GitHTTPSProxy string `json:"gitHTTPSProxy,omitempty"` + + // gitNoProxy is the list of domains for which the proxy should not be used + GitNoProxy string `json:"gitNoProxy,omitempty"` + + // env is a set of default environment variables that will be applied to the + // build if the specified variables do not exist on the build + Env []corev1.EnvVar `json:"env,omitempty"` + + // sourceStrategyDefaults are default values that apply to builds using the + // source strategy. + SourceStrategyDefaults *SourceStrategyDefaultsConfig `json:"sourceStrategyDefaults,omitempty"` + + // imageLabels is a list of labels that are applied to the resulting image. + // User can override a default label by providing a label with the same name in their + // Build/BuildConfig. + ImageLabels []buildv1.ImageLabel `json:"imageLabels,omitempty"` + + // nodeSelector is a selector which must be true for the build pod to fit on a node + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + + // annotations are annotations that will be added to the build pod + Annotations map[string]string `json:"annotations,omitempty"` + + // resources defines resource requirements to execute the build. + Resources corev1.ResourceRequirements `json:"resources,omitempty"` +} + +// SourceStrategyDefaultsConfig contains values that apply to builds using the +// source strategy. +type SourceStrategyDefaultsConfig struct { + + // incremental indicates if s2i build strategies should perform an incremental + // build or not + Incremental *bool `json:"incremental,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// BuildOverridesConfig controls override settings for builds +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +// +openshift:compatibility-gen:internal +type BuildOverridesConfig struct { + metav1.TypeMeta `json:",inline"` + + // forcePull indicates whether the build strategy should always be set to ForcePull=true + ForcePull bool `json:"forcePull"` + + // imageLabels is a list of labels that are applied to the resulting image. + // If user provided a label in their Build/BuildConfig with the same name as one in this + // list, the user's label will be overwritten. + ImageLabels []buildv1.ImageLabel `json:"imageLabels,omitempty"` + + // nodeSelector is a selector which must be true for the build pod to fit on a node + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + + // annotations are annotations that will be added to the build pod + Annotations map[string]string `json:"annotations,omitempty"` + + // tolerations is a list of Tolerations that will override any existing + // tolerations set on a build pod. + Tolerations []corev1.Toleration `json:"tolerations,omitempty"` +} diff --git a/vendor/github.com/openshift/api/legacyconfig/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/legacyconfig/v1/zz_generated.deepcopy.go new file mode 100644 index 000000000..17d717ea4 --- /dev/null +++ b/vendor/github.com/openshift/api/legacyconfig/v1/zz_generated.deepcopy.go @@ -0,0 +1,2143 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + buildv1 "github.com/openshift/api/build/v1" + corev1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ActiveDirectoryConfig) DeepCopyInto(out *ActiveDirectoryConfig) { + *out = *in + out.AllUsersQuery = in.AllUsersQuery + if in.UserNameAttributes != nil { + in, out := &in.UserNameAttributes, &out.UserNameAttributes + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.GroupMembershipAttributes != nil { + in, out := &in.GroupMembershipAttributes, &out.GroupMembershipAttributes + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ActiveDirectoryConfig. +func (in *ActiveDirectoryConfig) DeepCopy() *ActiveDirectoryConfig { + if in == nil { + return nil + } + out := new(ActiveDirectoryConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdmissionConfig) DeepCopyInto(out *AdmissionConfig) { + *out = *in + if in.PluginConfig != nil { + in, out := &in.PluginConfig, &out.PluginConfig + *out = make(map[string]*AdmissionPluginConfig, len(*in)) + for key, val := range *in { + var outVal *AdmissionPluginConfig + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(AdmissionPluginConfig) + (*in).DeepCopyInto(*out) + } + (*out)[key] = outVal + } + } + if in.PluginOrderOverride != nil { + in, out := &in.PluginOrderOverride, &out.PluginOrderOverride + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionConfig. +func (in *AdmissionConfig) DeepCopy() *AdmissionConfig { + if in == nil { + return nil + } + out := new(AdmissionConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AdmissionPluginConfig) DeepCopyInto(out *AdmissionPluginConfig) { + *out = *in + in.Configuration.DeepCopyInto(&out.Configuration) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionPluginConfig. +func (in *AdmissionPluginConfig) DeepCopy() *AdmissionPluginConfig { + if in == nil { + return nil + } + out := new(AdmissionPluginConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AggregatorConfig) DeepCopyInto(out *AggregatorConfig) { + *out = *in + out.ProxyClientInfo = in.ProxyClientInfo + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AggregatorConfig. +func (in *AggregatorConfig) DeepCopy() *AggregatorConfig { + if in == nil { + return nil + } + out := new(AggregatorConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AllowAllPasswordIdentityProvider) DeepCopyInto(out *AllowAllPasswordIdentityProvider) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowAllPasswordIdentityProvider. +func (in *AllowAllPasswordIdentityProvider) DeepCopy() *AllowAllPasswordIdentityProvider { + if in == nil { + return nil + } + out := new(AllowAllPasswordIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AllowAllPasswordIdentityProvider) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in AllowedRegistries) DeepCopyInto(out *AllowedRegistries) { + { + in := &in + *out = make(AllowedRegistries, len(*in)) + copy(*out, *in) + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedRegistries. +func (in AllowedRegistries) DeepCopy() AllowedRegistries { + if in == nil { + return nil + } + out := new(AllowedRegistries) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuditConfig) DeepCopyInto(out *AuditConfig) { + *out = *in + in.PolicyConfiguration.DeepCopyInto(&out.PolicyConfiguration) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuditConfig. +func (in *AuditConfig) DeepCopy() *AuditConfig { + if in == nil { + return nil + } + out := new(AuditConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AugmentedActiveDirectoryConfig) DeepCopyInto(out *AugmentedActiveDirectoryConfig) { + *out = *in + out.AllUsersQuery = in.AllUsersQuery + if in.UserNameAttributes != nil { + in, out := &in.UserNameAttributes, &out.UserNameAttributes + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.GroupMembershipAttributes != nil { + in, out := &in.GroupMembershipAttributes, &out.GroupMembershipAttributes + *out = make([]string, len(*in)) + copy(*out, *in) + } + out.AllGroupsQuery = in.AllGroupsQuery + if in.GroupNameAttributes != nil { + in, out := &in.GroupNameAttributes, &out.GroupNameAttributes + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AugmentedActiveDirectoryConfig. +func (in *AugmentedActiveDirectoryConfig) DeepCopy() *AugmentedActiveDirectoryConfig { + if in == nil { + return nil + } + out := new(AugmentedActiveDirectoryConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BasicAuthPasswordIdentityProvider) DeepCopyInto(out *BasicAuthPasswordIdentityProvider) { + *out = *in + out.TypeMeta = in.TypeMeta + out.RemoteConnectionInfo = in.RemoteConnectionInfo + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BasicAuthPasswordIdentityProvider. +func (in *BasicAuthPasswordIdentityProvider) DeepCopy() *BasicAuthPasswordIdentityProvider { + if in == nil { + return nil + } + out := new(BasicAuthPasswordIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BasicAuthPasswordIdentityProvider) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildDefaultsConfig) DeepCopyInto(out *BuildDefaultsConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]corev1.EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SourceStrategyDefaults != nil { + in, out := &in.SourceStrategyDefaults, &out.SourceStrategyDefaults + *out = new(SourceStrategyDefaultsConfig) + (*in).DeepCopyInto(*out) + } + if in.ImageLabels != nil { + in, out := &in.ImageLabels, &out.ImageLabels + *out = make([]buildv1.ImageLabel, len(*in)) + copy(*out, *in) + } + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + in.Resources.DeepCopyInto(&out.Resources) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildDefaultsConfig. +func (in *BuildDefaultsConfig) DeepCopy() *BuildDefaultsConfig { + if in == nil { + return nil + } + out := new(BuildDefaultsConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BuildDefaultsConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildOverridesConfig) DeepCopyInto(out *BuildOverridesConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.ImageLabels != nil { + in, out := &in.ImageLabels, &out.ImageLabels + *out = make([]buildv1.ImageLabel, len(*in)) + copy(*out, *in) + } + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]corev1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildOverridesConfig. +func (in *BuildOverridesConfig) DeepCopy() *BuildOverridesConfig { + if in == nil { + return nil + } + out := new(BuildOverridesConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BuildOverridesConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertInfo) DeepCopyInto(out *CertInfo) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertInfo. +func (in *CertInfo) DeepCopy() *CertInfo { + if in == nil { + return nil + } + out := new(CertInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientConnectionOverrides) DeepCopyInto(out *ClientConnectionOverrides) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientConnectionOverrides. +func (in *ClientConnectionOverrides) DeepCopy() *ClientConnectionOverrides { + if in == nil { + return nil + } + out := new(ClientConnectionOverrides) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterNetworkEntry) DeepCopyInto(out *ClusterNetworkEntry) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterNetworkEntry. +func (in *ClusterNetworkEntry) DeepCopy() *ClusterNetworkEntry { + if in == nil { + return nil + } + out := new(ClusterNetworkEntry) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControllerConfig) DeepCopyInto(out *ControllerConfig) { + *out = *in + if in.Controllers != nil { + in, out := &in.Controllers, &out.Controllers + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Election != nil { + in, out := &in.Election, &out.Election + *out = new(ControllerElectionConfig) + **out = **in + } + in.ServiceServingCert.DeepCopyInto(&out.ServiceServingCert) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerConfig. +func (in *ControllerConfig) DeepCopy() *ControllerConfig { + if in == nil { + return nil + } + out := new(ControllerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControllerElectionConfig) DeepCopyInto(out *ControllerElectionConfig) { + *out = *in + out.LockResource = in.LockResource + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerElectionConfig. +func (in *ControllerElectionConfig) DeepCopy() *ControllerElectionConfig { + if in == nil { + return nil + } + out := new(ControllerElectionConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DNSConfig) DeepCopyInto(out *DNSConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DNSConfig. +func (in *DNSConfig) DeepCopy() *DNSConfig { + if in == nil { + return nil + } + out := new(DNSConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DefaultAdmissionConfig) DeepCopyInto(out *DefaultAdmissionConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultAdmissionConfig. +func (in *DefaultAdmissionConfig) DeepCopy() *DefaultAdmissionConfig { + if in == nil { + return nil + } + out := new(DefaultAdmissionConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DefaultAdmissionConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DenyAllPasswordIdentityProvider) DeepCopyInto(out *DenyAllPasswordIdentityProvider) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DenyAllPasswordIdentityProvider. +func (in *DenyAllPasswordIdentityProvider) DeepCopy() *DenyAllPasswordIdentityProvider { + if in == nil { + return nil + } + out := new(DenyAllPasswordIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DenyAllPasswordIdentityProvider) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DockerConfig) DeepCopyInto(out *DockerConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerConfig. +func (in *DockerConfig) DeepCopy() *DockerConfig { + if in == nil { + return nil + } + out := new(DockerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EtcdConfig) DeepCopyInto(out *EtcdConfig) { + *out = *in + in.ServingInfo.DeepCopyInto(&out.ServingInfo) + in.PeerServingInfo.DeepCopyInto(&out.PeerServingInfo) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdConfig. +func (in *EtcdConfig) DeepCopy() *EtcdConfig { + if in == nil { + return nil + } + out := new(EtcdConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EtcdConnectionInfo) DeepCopyInto(out *EtcdConnectionInfo) { + *out = *in + if in.URLs != nil { + in, out := &in.URLs, &out.URLs + *out = make([]string, len(*in)) + copy(*out, *in) + } + out.CertInfo = in.CertInfo + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdConnectionInfo. +func (in *EtcdConnectionInfo) DeepCopy() *EtcdConnectionInfo { + if in == nil { + return nil + } + out := new(EtcdConnectionInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EtcdStorageConfig) DeepCopyInto(out *EtcdStorageConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdStorageConfig. +func (in *EtcdStorageConfig) DeepCopy() *EtcdStorageConfig { + if in == nil { + return nil + } + out := new(EtcdStorageConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in ExtendedArguments) DeepCopyInto(out *ExtendedArguments) { + { + in := &in + *out = make(ExtendedArguments, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make([]string, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtendedArguments. +func (in ExtendedArguments) DeepCopy() ExtendedArguments { + if in == nil { + return nil + } + out := new(ExtendedArguments) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in FeatureList) DeepCopyInto(out *FeatureList) { + { + in := &in + *out = make(FeatureList, len(*in)) + copy(*out, *in) + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FeatureList. +func (in FeatureList) DeepCopy() FeatureList { + if in == nil { + return nil + } + out := new(FeatureList) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitHubIdentityProvider) DeepCopyInto(out *GitHubIdentityProvider) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ClientSecret = in.ClientSecret + if in.Organizations != nil { + in, out := &in.Organizations, &out.Organizations + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Teams != nil { + in, out := &in.Teams, &out.Teams + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitHubIdentityProvider. +func (in *GitHubIdentityProvider) DeepCopy() *GitHubIdentityProvider { + if in == nil { + return nil + } + out := new(GitHubIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GitHubIdentityProvider) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitLabIdentityProvider) DeepCopyInto(out *GitLabIdentityProvider) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ClientSecret = in.ClientSecret + if in.Legacy != nil { + in, out := &in.Legacy, &out.Legacy + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitLabIdentityProvider. +func (in *GitLabIdentityProvider) DeepCopy() *GitLabIdentityProvider { + if in == nil { + return nil + } + out := new(GitLabIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GitLabIdentityProvider) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GoogleIdentityProvider) DeepCopyInto(out *GoogleIdentityProvider) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ClientSecret = in.ClientSecret + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GoogleIdentityProvider. +func (in *GoogleIdentityProvider) DeepCopy() *GoogleIdentityProvider { + if in == nil { + return nil + } + out := new(GoogleIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GoogleIdentityProvider) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GrantConfig) DeepCopyInto(out *GrantConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GrantConfig. +func (in *GrantConfig) DeepCopy() *GrantConfig { + if in == nil { + return nil + } + out := new(GrantConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupResource) DeepCopyInto(out *GroupResource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupResource. +func (in *GroupResource) DeepCopy() *GroupResource { + if in == nil { + return nil + } + out := new(GroupResource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTPasswdPasswordIdentityProvider) DeepCopyInto(out *HTPasswdPasswordIdentityProvider) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTPasswdPasswordIdentityProvider. +func (in *HTPasswdPasswordIdentityProvider) DeepCopy() *HTPasswdPasswordIdentityProvider { + if in == nil { + return nil + } + out := new(HTPasswdPasswordIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HTPasswdPasswordIdentityProvider) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPServingInfo) DeepCopyInto(out *HTTPServingInfo) { + *out = *in + in.ServingInfo.DeepCopyInto(&out.ServingInfo) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPServingInfo. +func (in *HTTPServingInfo) DeepCopy() *HTTPServingInfo { + if in == nil { + return nil + } + out := new(HTTPServingInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityProvider) DeepCopyInto(out *IdentityProvider) { + *out = *in + in.Provider.DeepCopyInto(&out.Provider) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityProvider. +func (in *IdentityProvider) DeepCopy() *IdentityProvider { + if in == nil { + return nil + } + out := new(IdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageConfig) DeepCopyInto(out *ImageConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageConfig. +func (in *ImageConfig) DeepCopy() *ImageConfig { + if in == nil { + return nil + } + out := new(ImageConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImagePolicyConfig) DeepCopyInto(out *ImagePolicyConfig) { + *out = *in + if in.AllowedRegistriesForImport != nil { + in, out := &in.AllowedRegistriesForImport, &out.AllowedRegistriesForImport + *out = new(AllowedRegistries) + if **in != nil { + in, out := *in, *out + *out = make([]RegistryLocation, len(*in)) + copy(*out, *in) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePolicyConfig. +func (in *ImagePolicyConfig) DeepCopy() *ImagePolicyConfig { + if in == nil { + return nil + } + out := new(ImagePolicyConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JenkinsPipelineConfig) DeepCopyInto(out *JenkinsPipelineConfig) { + *out = *in + if in.AutoProvisionEnabled != nil { + in, out := &in.AutoProvisionEnabled, &out.AutoProvisionEnabled + *out = new(bool) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JenkinsPipelineConfig. +func (in *JenkinsPipelineConfig) DeepCopy() *JenkinsPipelineConfig { + if in == nil { + return nil + } + out := new(JenkinsPipelineConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KeystonePasswordIdentityProvider) DeepCopyInto(out *KeystonePasswordIdentityProvider) { + *out = *in + out.TypeMeta = in.TypeMeta + out.RemoteConnectionInfo = in.RemoteConnectionInfo + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KeystonePasswordIdentityProvider. +func (in *KeystonePasswordIdentityProvider) DeepCopy() *KeystonePasswordIdentityProvider { + if in == nil { + return nil + } + out := new(KeystonePasswordIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KeystonePasswordIdentityProvider) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubeletConnectionInfo) DeepCopyInto(out *KubeletConnectionInfo) { + *out = *in + out.CertInfo = in.CertInfo + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeletConnectionInfo. +func (in *KubeletConnectionInfo) DeepCopy() *KubeletConnectionInfo { + if in == nil { + return nil + } + out := new(KubeletConnectionInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubernetesMasterConfig) DeepCopyInto(out *KubernetesMasterConfig) { + *out = *in + if in.APILevels != nil { + in, out := &in.APILevels, &out.APILevels + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.DisabledAPIGroupVersions != nil { + in, out := &in.DisabledAPIGroupVersions, &out.DisabledAPIGroupVersions + *out = make(map[string][]string, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make([]string, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + out.ProxyClientInfo = in.ProxyClientInfo + if in.APIServerArguments != nil { + in, out := &in.APIServerArguments, &out.APIServerArguments + *out = make(ExtendedArguments, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make([]string, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + if in.ControllerArguments != nil { + in, out := &in.ControllerArguments, &out.ControllerArguments + *out = make(ExtendedArguments, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make([]string, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + if in.SchedulerArguments != nil { + in, out := &in.SchedulerArguments, &out.SchedulerArguments + *out = make(ExtendedArguments, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make([]string, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesMasterConfig. +func (in *KubernetesMasterConfig) DeepCopy() *KubernetesMasterConfig { + if in == nil { + return nil + } + out := new(KubernetesMasterConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LDAPAttributeMapping) DeepCopyInto(out *LDAPAttributeMapping) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.PreferredUsername != nil { + in, out := &in.PreferredUsername, &out.PreferredUsername + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Email != nil { + in, out := &in.Email, &out.Email + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LDAPAttributeMapping. +func (in *LDAPAttributeMapping) DeepCopy() *LDAPAttributeMapping { + if in == nil { + return nil + } + out := new(LDAPAttributeMapping) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LDAPPasswordIdentityProvider) DeepCopyInto(out *LDAPPasswordIdentityProvider) { + *out = *in + out.TypeMeta = in.TypeMeta + out.BindPassword = in.BindPassword + in.Attributes.DeepCopyInto(&out.Attributes) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LDAPPasswordIdentityProvider. +func (in *LDAPPasswordIdentityProvider) DeepCopy() *LDAPPasswordIdentityProvider { + if in == nil { + return nil + } + out := new(LDAPPasswordIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LDAPPasswordIdentityProvider) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LDAPQuery) DeepCopyInto(out *LDAPQuery) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LDAPQuery. +func (in *LDAPQuery) DeepCopy() *LDAPQuery { + if in == nil { + return nil + } + out := new(LDAPQuery) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LDAPSyncConfig) DeepCopyInto(out *LDAPSyncConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + out.BindPassword = in.BindPassword + if in.LDAPGroupUIDToOpenShiftGroupNameMapping != nil { + in, out := &in.LDAPGroupUIDToOpenShiftGroupNameMapping, &out.LDAPGroupUIDToOpenShiftGroupNameMapping + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.RFC2307Config != nil { + in, out := &in.RFC2307Config, &out.RFC2307Config + *out = new(RFC2307Config) + (*in).DeepCopyInto(*out) + } + if in.ActiveDirectoryConfig != nil { + in, out := &in.ActiveDirectoryConfig, &out.ActiveDirectoryConfig + *out = new(ActiveDirectoryConfig) + (*in).DeepCopyInto(*out) + } + if in.AugmentedActiveDirectoryConfig != nil { + in, out := &in.AugmentedActiveDirectoryConfig, &out.AugmentedActiveDirectoryConfig + *out = new(AugmentedActiveDirectoryConfig) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LDAPSyncConfig. +func (in *LDAPSyncConfig) DeepCopy() *LDAPSyncConfig { + if in == nil { + return nil + } + out := new(LDAPSyncConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LDAPSyncConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocalQuota) DeepCopyInto(out *LocalQuota) { + *out = *in + if in.PerFSGroup != nil { + in, out := &in.PerFSGroup, &out.PerFSGroup + x := (*in).DeepCopy() + *out = &x + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalQuota. +func (in *LocalQuota) DeepCopy() *LocalQuota { + if in == nil { + return nil + } + out := new(LocalQuota) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MasterAuthConfig) DeepCopyInto(out *MasterAuthConfig) { + *out = *in + if in.RequestHeader != nil { + in, out := &in.RequestHeader, &out.RequestHeader + *out = new(RequestHeaderAuthenticationOptions) + (*in).DeepCopyInto(*out) + } + if in.WebhookTokenAuthenticators != nil { + in, out := &in.WebhookTokenAuthenticators, &out.WebhookTokenAuthenticators + *out = make([]WebhookTokenAuthenticator, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MasterAuthConfig. +func (in *MasterAuthConfig) DeepCopy() *MasterAuthConfig { + if in == nil { + return nil + } + out := new(MasterAuthConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MasterClients) DeepCopyInto(out *MasterClients) { + *out = *in + if in.OpenShiftLoopbackClientConnectionOverrides != nil { + in, out := &in.OpenShiftLoopbackClientConnectionOverrides, &out.OpenShiftLoopbackClientConnectionOverrides + *out = new(ClientConnectionOverrides) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MasterClients. +func (in *MasterClients) DeepCopy() *MasterClients { + if in == nil { + return nil + } + out := new(MasterClients) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MasterConfig) DeepCopyInto(out *MasterConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ServingInfo.DeepCopyInto(&out.ServingInfo) + in.AuthConfig.DeepCopyInto(&out.AuthConfig) + out.AggregatorConfig = in.AggregatorConfig + if in.CORSAllowedOrigins != nil { + in, out := &in.CORSAllowedOrigins, &out.CORSAllowedOrigins + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.APILevels != nil { + in, out := &in.APILevels, &out.APILevels + *out = make([]string, len(*in)) + copy(*out, *in) + } + in.AdmissionConfig.DeepCopyInto(&out.AdmissionConfig) + in.ControllerConfig.DeepCopyInto(&out.ControllerConfig) + out.EtcdStorageConfig = in.EtcdStorageConfig + in.EtcdClientInfo.DeepCopyInto(&out.EtcdClientInfo) + out.KubeletClientInfo = in.KubeletClientInfo + in.KubernetesMasterConfig.DeepCopyInto(&out.KubernetesMasterConfig) + if in.EtcdConfig != nil { + in, out := &in.EtcdConfig, &out.EtcdConfig + *out = new(EtcdConfig) + (*in).DeepCopyInto(*out) + } + if in.OAuthConfig != nil { + in, out := &in.OAuthConfig, &out.OAuthConfig + *out = new(OAuthConfig) + (*in).DeepCopyInto(*out) + } + if in.DNSConfig != nil { + in, out := &in.DNSConfig, &out.DNSConfig + *out = new(DNSConfig) + **out = **in + } + in.ServiceAccountConfig.DeepCopyInto(&out.ServiceAccountConfig) + in.MasterClients.DeepCopyInto(&out.MasterClients) + out.ImageConfig = in.ImageConfig + in.ImagePolicyConfig.DeepCopyInto(&out.ImagePolicyConfig) + in.PolicyConfig.DeepCopyInto(&out.PolicyConfig) + in.ProjectConfig.DeepCopyInto(&out.ProjectConfig) + out.RoutingConfig = in.RoutingConfig + in.NetworkConfig.DeepCopyInto(&out.NetworkConfig) + in.VolumeConfig.DeepCopyInto(&out.VolumeConfig) + in.JenkinsPipelineConfig.DeepCopyInto(&out.JenkinsPipelineConfig) + in.AuditConfig.DeepCopyInto(&out.AuditConfig) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MasterConfig. +func (in *MasterConfig) DeepCopy() *MasterConfig { + if in == nil { + return nil + } + out := new(MasterConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MasterConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MasterNetworkConfig) DeepCopyInto(out *MasterNetworkConfig) { + *out = *in + if in.ClusterNetworks != nil { + in, out := &in.ClusterNetworks, &out.ClusterNetworks + *out = make([]ClusterNetworkEntry, len(*in)) + copy(*out, *in) + } + if in.ExternalIPNetworkCIDRs != nil { + in, out := &in.ExternalIPNetworkCIDRs, &out.ExternalIPNetworkCIDRs + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MasterNetworkConfig. +func (in *MasterNetworkConfig) DeepCopy() *MasterNetworkConfig { + if in == nil { + return nil + } + out := new(MasterNetworkConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MasterVolumeConfig) DeepCopyInto(out *MasterVolumeConfig) { + *out = *in + if in.DynamicProvisioningEnabled != nil { + in, out := &in.DynamicProvisioningEnabled, &out.DynamicProvisioningEnabled + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MasterVolumeConfig. +func (in *MasterVolumeConfig) DeepCopy() *MasterVolumeConfig { + if in == nil { + return nil + } + out := new(MasterVolumeConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NamedCertificate) DeepCopyInto(out *NamedCertificate) { + *out = *in + if in.Names != nil { + in, out := &in.Names, &out.Names + *out = make([]string, len(*in)) + copy(*out, *in) + } + out.CertInfo = in.CertInfo + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedCertificate. +func (in *NamedCertificate) DeepCopy() *NamedCertificate { + if in == nil { + return nil + } + out := new(NamedCertificate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeAuthConfig) DeepCopyInto(out *NodeAuthConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeAuthConfig. +func (in *NodeAuthConfig) DeepCopy() *NodeAuthConfig { + if in == nil { + return nil + } + out := new(NodeAuthConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeConfig) DeepCopyInto(out *NodeConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ServingInfo.DeepCopyInto(&out.ServingInfo) + if in.MasterClientConnectionOverrides != nil { + in, out := &in.MasterClientConnectionOverrides, &out.MasterClientConnectionOverrides + *out = new(ClientConnectionOverrides) + **out = **in + } + if in.DNSNameservers != nil { + in, out := &in.DNSNameservers, &out.DNSNameservers + *out = make([]string, len(*in)) + copy(*out, *in) + } + out.NetworkConfig = in.NetworkConfig + out.ImageConfig = in.ImageConfig + if in.PodManifestConfig != nil { + in, out := &in.PodManifestConfig, &out.PodManifestConfig + *out = new(PodManifestConfig) + **out = **in + } + out.AuthConfig = in.AuthConfig + out.DockerConfig = in.DockerConfig + if in.KubeletArguments != nil { + in, out := &in.KubeletArguments, &out.KubeletArguments + *out = make(ExtendedArguments, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make([]string, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + if in.ProxyArguments != nil { + in, out := &in.ProxyArguments, &out.ProxyArguments + *out = make(ExtendedArguments, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make([]string, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + if in.EnableUnidling != nil { + in, out := &in.EnableUnidling, &out.EnableUnidling + *out = new(bool) + **out = **in + } + in.VolumeConfig.DeepCopyInto(&out.VolumeConfig) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeConfig. +func (in *NodeConfig) DeepCopy() *NodeConfig { + if in == nil { + return nil + } + out := new(NodeConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NodeConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeNetworkConfig) DeepCopyInto(out *NodeNetworkConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeNetworkConfig. +func (in *NodeNetworkConfig) DeepCopy() *NodeNetworkConfig { + if in == nil { + return nil + } + out := new(NodeNetworkConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeVolumeConfig) DeepCopyInto(out *NodeVolumeConfig) { + *out = *in + in.LocalQuota.DeepCopyInto(&out.LocalQuota) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeVolumeConfig. +func (in *NodeVolumeConfig) DeepCopy() *NodeVolumeConfig { + if in == nil { + return nil + } + out := new(NodeVolumeConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OAuthConfig) DeepCopyInto(out *OAuthConfig) { + *out = *in + if in.MasterCA != nil { + in, out := &in.MasterCA, &out.MasterCA + *out = new(string) + **out = **in + } + if in.IdentityProviders != nil { + in, out := &in.IdentityProviders, &out.IdentityProviders + *out = make([]IdentityProvider, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + out.GrantConfig = in.GrantConfig + if in.SessionConfig != nil { + in, out := &in.SessionConfig, &out.SessionConfig + *out = new(SessionConfig) + **out = **in + } + in.TokenConfig.DeepCopyInto(&out.TokenConfig) + if in.Templates != nil { + in, out := &in.Templates, &out.Templates + *out = new(OAuthTemplates) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthConfig. +func (in *OAuthConfig) DeepCopy() *OAuthConfig { + if in == nil { + return nil + } + out := new(OAuthConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OAuthTemplates) DeepCopyInto(out *OAuthTemplates) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthTemplates. +func (in *OAuthTemplates) DeepCopy() *OAuthTemplates { + if in == nil { + return nil + } + out := new(OAuthTemplates) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenIDClaims) DeepCopyInto(out *OpenIDClaims) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.PreferredUsername != nil { + in, out := &in.PreferredUsername, &out.PreferredUsername + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Email != nil { + in, out := &in.Email, &out.Email + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenIDClaims. +func (in *OpenIDClaims) DeepCopy() *OpenIDClaims { + if in == nil { + return nil + } + out := new(OpenIDClaims) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenIDIdentityProvider) DeepCopyInto(out *OpenIDIdentityProvider) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ClientSecret = in.ClientSecret + if in.ExtraScopes != nil { + in, out := &in.ExtraScopes, &out.ExtraScopes + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExtraAuthorizeParameters != nil { + in, out := &in.ExtraAuthorizeParameters, &out.ExtraAuthorizeParameters + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + out.URLs = in.URLs + in.Claims.DeepCopyInto(&out.Claims) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenIDIdentityProvider. +func (in *OpenIDIdentityProvider) DeepCopy() *OpenIDIdentityProvider { + if in == nil { + return nil + } + out := new(OpenIDIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OpenIDIdentityProvider) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenIDURLs) DeepCopyInto(out *OpenIDURLs) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenIDURLs. +func (in *OpenIDURLs) DeepCopy() *OpenIDURLs { + if in == nil { + return nil + } + out := new(OpenIDURLs) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodManifestConfig) DeepCopyInto(out *PodManifestConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodManifestConfig. +func (in *PodManifestConfig) DeepCopy() *PodManifestConfig { + if in == nil { + return nil + } + out := new(PodManifestConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyConfig) DeepCopyInto(out *PolicyConfig) { + *out = *in + in.UserAgentMatchingConfig.DeepCopyInto(&out.UserAgentMatchingConfig) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyConfig. +func (in *PolicyConfig) DeepCopy() *PolicyConfig { + if in == nil { + return nil + } + out := new(PolicyConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectConfig) DeepCopyInto(out *ProjectConfig) { + *out = *in + if in.SecurityAllocator != nil { + in, out := &in.SecurityAllocator, &out.SecurityAllocator + *out = new(SecurityAllocator) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectConfig. +func (in *ProjectConfig) DeepCopy() *ProjectConfig { + if in == nil { + return nil + } + out := new(ProjectConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RFC2307Config) DeepCopyInto(out *RFC2307Config) { + *out = *in + out.AllGroupsQuery = in.AllGroupsQuery + if in.GroupNameAttributes != nil { + in, out := &in.GroupNameAttributes, &out.GroupNameAttributes + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.GroupMembershipAttributes != nil { + in, out := &in.GroupMembershipAttributes, &out.GroupMembershipAttributes + *out = make([]string, len(*in)) + copy(*out, *in) + } + out.AllUsersQuery = in.AllUsersQuery + if in.UserNameAttributes != nil { + in, out := &in.UserNameAttributes, &out.UserNameAttributes + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RFC2307Config. +func (in *RFC2307Config) DeepCopy() *RFC2307Config { + if in == nil { + return nil + } + out := new(RFC2307Config) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegistryLocation) DeepCopyInto(out *RegistryLocation) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistryLocation. +func (in *RegistryLocation) DeepCopy() *RegistryLocation { + if in == nil { + return nil + } + out := new(RegistryLocation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RemoteConnectionInfo) DeepCopyInto(out *RemoteConnectionInfo) { + *out = *in + out.CertInfo = in.CertInfo + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RemoteConnectionInfo. +func (in *RemoteConnectionInfo) DeepCopy() *RemoteConnectionInfo { + if in == nil { + return nil + } + out := new(RemoteConnectionInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequestHeaderAuthenticationOptions) DeepCopyInto(out *RequestHeaderAuthenticationOptions) { + *out = *in + if in.ClientCommonNames != nil { + in, out := &in.ClientCommonNames, &out.ClientCommonNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.UsernameHeaders != nil { + in, out := &in.UsernameHeaders, &out.UsernameHeaders + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.GroupHeaders != nil { + in, out := &in.GroupHeaders, &out.GroupHeaders + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExtraHeaderPrefixes != nil { + in, out := &in.ExtraHeaderPrefixes, &out.ExtraHeaderPrefixes + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestHeaderAuthenticationOptions. +func (in *RequestHeaderAuthenticationOptions) DeepCopy() *RequestHeaderAuthenticationOptions { + if in == nil { + return nil + } + out := new(RequestHeaderAuthenticationOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequestHeaderIdentityProvider) DeepCopyInto(out *RequestHeaderIdentityProvider) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.ClientCommonNames != nil { + in, out := &in.ClientCommonNames, &out.ClientCommonNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.PreferredUsernameHeaders != nil { + in, out := &in.PreferredUsernameHeaders, &out.PreferredUsernameHeaders + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.NameHeaders != nil { + in, out := &in.NameHeaders, &out.NameHeaders + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.EmailHeaders != nil { + in, out := &in.EmailHeaders, &out.EmailHeaders + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestHeaderIdentityProvider. +func (in *RequestHeaderIdentityProvider) DeepCopy() *RequestHeaderIdentityProvider { + if in == nil { + return nil + } + out := new(RequestHeaderIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RequestHeaderIdentityProvider) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoutingConfig) DeepCopyInto(out *RoutingConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoutingConfig. +func (in *RoutingConfig) DeepCopy() *RoutingConfig { + if in == nil { + return nil + } + out := new(RoutingConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityAllocator) DeepCopyInto(out *SecurityAllocator) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityAllocator. +func (in *SecurityAllocator) DeepCopy() *SecurityAllocator { + if in == nil { + return nil + } + out := new(SecurityAllocator) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountConfig) DeepCopyInto(out *ServiceAccountConfig) { + *out = *in + if in.ManagedNames != nil { + in, out := &in.ManagedNames, &out.ManagedNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.PublicKeyFiles != nil { + in, out := &in.PublicKeyFiles, &out.PublicKeyFiles + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountConfig. +func (in *ServiceAccountConfig) DeepCopy() *ServiceAccountConfig { + if in == nil { + return nil + } + out := new(ServiceAccountConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceServingCert) DeepCopyInto(out *ServiceServingCert) { + *out = *in + if in.Signer != nil { + in, out := &in.Signer, &out.Signer + *out = new(CertInfo) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceServingCert. +func (in *ServiceServingCert) DeepCopy() *ServiceServingCert { + if in == nil { + return nil + } + out := new(ServiceServingCert) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServingInfo) DeepCopyInto(out *ServingInfo) { + *out = *in + out.CertInfo = in.CertInfo + if in.NamedCertificates != nil { + in, out := &in.NamedCertificates, &out.NamedCertificates + *out = make([]NamedCertificate, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CipherSuites != nil { + in, out := &in.CipherSuites, &out.CipherSuites + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServingInfo. +func (in *ServingInfo) DeepCopy() *ServingInfo { + if in == nil { + return nil + } + out := new(ServingInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SessionConfig) DeepCopyInto(out *SessionConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SessionConfig. +func (in *SessionConfig) DeepCopy() *SessionConfig { + if in == nil { + return nil + } + out := new(SessionConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SessionSecret) DeepCopyInto(out *SessionSecret) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SessionSecret. +func (in *SessionSecret) DeepCopy() *SessionSecret { + if in == nil { + return nil + } + out := new(SessionSecret) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SessionSecrets) DeepCopyInto(out *SessionSecrets) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Secrets != nil { + in, out := &in.Secrets, &out.Secrets + *out = make([]SessionSecret, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SessionSecrets. +func (in *SessionSecrets) DeepCopy() *SessionSecrets { + if in == nil { + return nil + } + out := new(SessionSecrets) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SessionSecrets) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceStrategyDefaultsConfig) DeepCopyInto(out *SourceStrategyDefaultsConfig) { + *out = *in + if in.Incremental != nil { + in, out := &in.Incremental, &out.Incremental + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceStrategyDefaultsConfig. +func (in *SourceStrategyDefaultsConfig) DeepCopy() *SourceStrategyDefaultsConfig { + if in == nil { + return nil + } + out := new(SourceStrategyDefaultsConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StringSource) DeepCopyInto(out *StringSource) { + *out = *in + out.StringSourceSpec = in.StringSourceSpec + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StringSource. +func (in *StringSource) DeepCopy() *StringSource { + if in == nil { + return nil + } + out := new(StringSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StringSourceSpec) DeepCopyInto(out *StringSourceSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StringSourceSpec. +func (in *StringSourceSpec) DeepCopy() *StringSourceSpec { + if in == nil { + return nil + } + out := new(StringSourceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TokenConfig) DeepCopyInto(out *TokenConfig) { + *out = *in + if in.AccessTokenInactivityTimeoutSeconds != nil { + in, out := &in.AccessTokenInactivityTimeoutSeconds, &out.AccessTokenInactivityTimeoutSeconds + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenConfig. +func (in *TokenConfig) DeepCopy() *TokenConfig { + if in == nil { + return nil + } + out := new(TokenConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserAgentDenyRule) DeepCopyInto(out *UserAgentDenyRule) { + *out = *in + in.UserAgentMatchRule.DeepCopyInto(&out.UserAgentMatchRule) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserAgentDenyRule. +func (in *UserAgentDenyRule) DeepCopy() *UserAgentDenyRule { + if in == nil { + return nil + } + out := new(UserAgentDenyRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserAgentMatchRule) DeepCopyInto(out *UserAgentMatchRule) { + *out = *in + if in.HTTPVerbs != nil { + in, out := &in.HTTPVerbs, &out.HTTPVerbs + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserAgentMatchRule. +func (in *UserAgentMatchRule) DeepCopy() *UserAgentMatchRule { + if in == nil { + return nil + } + out := new(UserAgentMatchRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserAgentMatchingConfig) DeepCopyInto(out *UserAgentMatchingConfig) { + *out = *in + if in.RequiredClients != nil { + in, out := &in.RequiredClients, &out.RequiredClients + *out = make([]UserAgentMatchRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DeniedClients != nil { + in, out := &in.DeniedClients, &out.DeniedClients + *out = make([]UserAgentDenyRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserAgentMatchingConfig. +func (in *UserAgentMatchingConfig) DeepCopy() *UserAgentMatchingConfig { + if in == nil { + return nil + } + out := new(UserAgentMatchingConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookTokenAuthenticator) DeepCopyInto(out *WebhookTokenAuthenticator) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookTokenAuthenticator. +func (in *WebhookTokenAuthenticator) DeepCopy() *WebhookTokenAuthenticator { + if in == nil { + return nil + } + out := new(WebhookTokenAuthenticator) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/api/legacyconfig/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/legacyconfig/v1/zz_generated.swagger_doc_generated.go new file mode 100644 index 000000000..29269b1a3 --- /dev/null +++ b/vendor/github.com/openshift/api/legacyconfig/v1/zz_generated.swagger_doc_generated.go @@ -0,0 +1,977 @@ +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_ActiveDirectoryConfig = map[string]string{ + "": "ActiveDirectoryConfig holds the necessary configuration options to define how an LDAP group sync interacts with an LDAP server using the Active Directory schema", + "usersQuery": "AllUsersQuery holds the template for an LDAP query that returns user entries.", + "userNameAttributes": "UserNameAttributes defines which attributes on an LDAP user entry will be interpreted as its OpenShift user name.", + "groupMembershipAttributes": "GroupMembershipAttributes defines which attributes on an LDAP user entry will be interpreted as the groups it is a member of", +} + +func (ActiveDirectoryConfig) SwaggerDoc() map[string]string { + return map_ActiveDirectoryConfig +} + +var map_AdmissionConfig = map[string]string{ + "": "AdmissionConfig holds the necessary configuration options for admission", + "pluginConfig": "PluginConfig allows specifying a configuration file per admission control plugin", + "pluginOrderOverride": "PluginOrderOverride is a list of admission control plugin names that will be installed on the master. Order is significant. If empty, a default list of plugins is used.", +} + +func (AdmissionConfig) SwaggerDoc() map[string]string { + return map_AdmissionConfig +} + +var map_AdmissionPluginConfig = map[string]string{ + "": "AdmissionPluginConfig holds the necessary configuration options for admission plugins", + "location": "Location is the path to a configuration file that contains the plugin's configuration", + "configuration": "Configuration is an embedded configuration object to be used as the plugin's configuration. If present, it will be used instead of the path to the configuration file.", +} + +func (AdmissionPluginConfig) SwaggerDoc() map[string]string { + return map_AdmissionPluginConfig +} + +var map_AggregatorConfig = map[string]string{ + "": "AggregatorConfig holds information required to make the aggregator function.", + "proxyClientInfo": "ProxyClientInfo specifies the client cert/key to use when proxying to aggregated API servers", +} + +func (AggregatorConfig) SwaggerDoc() map[string]string { + return map_AggregatorConfig +} + +var map_AllowAllPasswordIdentityProvider = map[string]string{ + "": "AllowAllPasswordIdentityProvider provides identities for users authenticating using non-empty passwords\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", +} + +func (AllowAllPasswordIdentityProvider) SwaggerDoc() map[string]string { + return map_AllowAllPasswordIdentityProvider +} + +var map_AuditConfig = map[string]string{ + "": "AuditConfig holds configuration for the audit capabilities", + "enabled": "If this flag is set, audit log will be printed in the logs. The logs contains, method, user and a requested URL.", + "auditFilePath": "All requests coming to the apiserver will be logged to this file.", + "maximumFileRetentionDays": "Maximum number of days to retain old log files based on the timestamp encoded in their filename.", + "maximumRetainedFiles": "Maximum number of old log files to retain.", + "maximumFileSizeMegabytes": "Maximum size in megabytes of the log file before it gets rotated. Defaults to 100MB.", + "policyFile": "PolicyFile is a path to the file that defines the audit policy configuration.", + "policyConfiguration": "PolicyConfiguration is an embedded policy configuration object to be used as the audit policy configuration. If present, it will be used instead of the path to the policy file.", + "logFormat": "Format of saved audits (legacy or json).", + "webHookKubeConfig": "Path to a .kubeconfig formatted file that defines the audit webhook configuration.", + "webHookMode": "Strategy for sending audit events (block or batch).", +} + +func (AuditConfig) SwaggerDoc() map[string]string { + return map_AuditConfig +} + +var map_AugmentedActiveDirectoryConfig = map[string]string{ + "": "AugmentedActiveDirectoryConfig holds the necessary configuration options to define how an LDAP group sync interacts with an LDAP server using the augmented Active Directory schema", + "usersQuery": "AllUsersQuery holds the template for an LDAP query that returns user entries.", + "userNameAttributes": "UserNameAttributes defines which attributes on an LDAP user entry will be interpreted as its OpenShift user name.", + "groupMembershipAttributes": "GroupMembershipAttributes defines which attributes on an LDAP user entry will be interpreted as the groups it is a member of", + "groupsQuery": "AllGroupsQuery holds the template for an LDAP query that returns group entries.", + "groupUIDAttribute": "GroupUIDAttributes defines which attribute on an LDAP group entry will be interpreted as its unique identifier. (ldapGroupUID)", + "groupNameAttributes": "GroupNameAttributes defines which attributes on an LDAP group entry will be interpreted as its name to use for an OpenShift group", +} + +func (AugmentedActiveDirectoryConfig) SwaggerDoc() map[string]string { + return map_AugmentedActiveDirectoryConfig +} + +var map_BasicAuthPasswordIdentityProvider = map[string]string{ + "": "BasicAuthPasswordIdentityProvider provides identities for users authenticating using HTTP basic auth credentials\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", +} + +func (BasicAuthPasswordIdentityProvider) SwaggerDoc() map[string]string { + return map_BasicAuthPasswordIdentityProvider +} + +var map_BuildDefaultsConfig = map[string]string{ + "": "BuildDefaultsConfig controls the default information for Builds\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "gitHTTPProxy": "gitHTTPProxy is the location of the HTTPProxy for Git source", + "gitHTTPSProxy": "gitHTTPSProxy is the location of the HTTPSProxy for Git source", + "gitNoProxy": "gitNoProxy is the list of domains for which the proxy should not be used", + "env": "env is a set of default environment variables that will be applied to the build if the specified variables do not exist on the build", + "sourceStrategyDefaults": "sourceStrategyDefaults are default values that apply to builds using the source strategy.", + "imageLabels": "imageLabels is a list of labels that are applied to the resulting image. User can override a default label by providing a label with the same name in their Build/BuildConfig.", + "nodeSelector": "nodeSelector is a selector which must be true for the build pod to fit on a node", + "annotations": "annotations are annotations that will be added to the build pod", + "resources": "resources defines resource requirements to execute the build.", +} + +func (BuildDefaultsConfig) SwaggerDoc() map[string]string { + return map_BuildDefaultsConfig +} + +var map_BuildOverridesConfig = map[string]string{ + "": "BuildOverridesConfig controls override settings for builds\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "forcePull": "forcePull indicates whether the build strategy should always be set to ForcePull=true", + "imageLabels": "imageLabels is a list of labels that are applied to the resulting image. If user provided a label in their Build/BuildConfig with the same name as one in this list, the user's label will be overwritten.", + "nodeSelector": "nodeSelector is a selector which must be true for the build pod to fit on a node", + "annotations": "annotations are annotations that will be added to the build pod", + "tolerations": "tolerations is a list of Tolerations that will override any existing tolerations set on a build pod.", +} + +func (BuildOverridesConfig) SwaggerDoc() map[string]string { + return map_BuildOverridesConfig +} + +var map_CertInfo = map[string]string{ + "": "CertInfo relates a certificate with a private key", + "certFile": "CertFile is a file containing a PEM-encoded certificate", + "keyFile": "KeyFile is a file containing a PEM-encoded private key for the certificate specified by CertFile", +} + +func (CertInfo) SwaggerDoc() map[string]string { + return map_CertInfo +} + +var map_ClientConnectionOverrides = map[string]string{ + "": "ClientConnectionOverrides are a set of overrides to the default client connection settings.", + "acceptContentTypes": "AcceptContentTypes defines the Accept header sent by clients when connecting to a server, overriding the default value of 'application/json'. This field will control all connections to the server used by a particular client.", + "contentType": "ContentType is the content type used when sending data to the server from this client.", + "qps": "QPS controls the number of queries per second allowed for this connection.", + "burst": "Burst allows extra queries to accumulate when a client is exceeding its rate.", +} + +func (ClientConnectionOverrides) SwaggerDoc() map[string]string { + return map_ClientConnectionOverrides +} + +var map_ClusterNetworkEntry = map[string]string{ + "": "ClusterNetworkEntry defines an individual cluster network. The CIDRs cannot overlap with other cluster network CIDRs, CIDRs reserved for external ips, CIDRs reserved for service networks, and CIDRs reserved for ingress ips.", + "cidr": "CIDR defines the total range of a cluster networks address space.", + "hostSubnetLength": "HostSubnetLength is the number of bits of the accompanying CIDR address to allocate to each node. eg, 8 would mean that each node would have a /24 slice of the overlay network for its pod.", +} + +func (ClusterNetworkEntry) SwaggerDoc() map[string]string { + return map_ClusterNetworkEntry +} + +var map_ControllerConfig = map[string]string{ + "": "ControllerConfig holds configuration values for controllers", + "controllers": "Controllers is a list of controllers to enable. '*' enables all on-by-default controllers, 'foo' enables the controller \"+ named 'foo', '-foo' disables the controller named 'foo'. Defaults to \"*\".", + "election": "Election defines the configuration for electing a controller instance to make changes to the cluster. If unspecified, the ControllerTTL value is checked to determine whether the legacy direct etcd election code will be used.", + "serviceServingCert": "ServiceServingCert holds configuration for service serving cert signer which creates cert/key pairs for pods fulfilling a service to serve with.", +} + +func (ControllerConfig) SwaggerDoc() map[string]string { + return map_ControllerConfig +} + +var map_ControllerElectionConfig = map[string]string{ + "": "ControllerElectionConfig contains configuration values for deciding how a controller will be elected to act as leader.", + "lockName": "LockName is the resource name used to act as the lock for determining which controller instance should lead.", + "lockNamespace": "LockNamespace is the resource namespace used to act as the lock for determining which controller instance should lead. It defaults to \"kube-system\"", + "lockResource": "LockResource is the group and resource name to use to coordinate for the controller lock. If unset, defaults to \"configmaps\".", +} + +func (ControllerElectionConfig) SwaggerDoc() map[string]string { + return map_ControllerElectionConfig +} + +var map_DNSConfig = map[string]string{ + "": "DNSConfig holds the necessary configuration options for DNS", + "bindAddress": "BindAddress is the ip:port to serve DNS on", + "bindNetwork": "BindNetwork is the type of network to bind to - defaults to \"tcp4\", accepts \"tcp\", \"tcp4\", and \"tcp6\"", + "allowRecursiveQueries": "AllowRecursiveQueries allows the DNS server on the master to answer queries recursively. Note that open resolvers can be used for DNS amplification attacks and the master DNS should not be made accessible to public networks.", +} + +func (DNSConfig) SwaggerDoc() map[string]string { + return map_DNSConfig +} + +var map_DefaultAdmissionConfig = map[string]string{ + "": "DefaultAdmissionConfig can be used to enable or disable various admission plugins. When this type is present as the `configuration` object under `pluginConfig` and *if* the admission plugin supports it, this will cause an \"off by default\" admission plugin to be enabled\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "disable": "Disable turns off an admission plugin that is enabled by default.", +} + +func (DefaultAdmissionConfig) SwaggerDoc() map[string]string { + return map_DefaultAdmissionConfig +} + +var map_DenyAllPasswordIdentityProvider = map[string]string{ + "": "DenyAllPasswordIdentityProvider provides no identities for users\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", +} + +func (DenyAllPasswordIdentityProvider) SwaggerDoc() map[string]string { + return map_DenyAllPasswordIdentityProvider +} + +var map_DockerConfig = map[string]string{ + "": "DockerConfig holds Docker related configuration options.", + "execHandlerName": "ExecHandlerName is the name of the handler to use for executing commands in containers.", + "dockerShimSocket": "DockerShimSocket is the location of the dockershim socket the kubelet uses. Currently unix socket is supported on Linux, and tcp is supported on windows. Examples:'unix:///var/run/dockershim.sock', 'tcp://localhost:3735'", + "dockerShimRootDirectory": "DockershimRootDirectory is the dockershim root directory.", +} + +func (DockerConfig) SwaggerDoc() map[string]string { + return map_DockerConfig +} + +var map_EtcdConfig = map[string]string{ + "": "EtcdConfig holds the necessary configuration options for connecting with an etcd database", + "servingInfo": "ServingInfo describes how to start serving the etcd master", + "address": "Address is the advertised host:port for client connections to etcd", + "peerServingInfo": "PeerServingInfo describes how to start serving the etcd peer", + "peerAddress": "PeerAddress is the advertised host:port for peer connections to etcd", + "storageDirectory": "StorageDir is the path to the etcd storage directory", +} + +func (EtcdConfig) SwaggerDoc() map[string]string { + return map_EtcdConfig +} + +var map_EtcdConnectionInfo = map[string]string{ + "": "EtcdConnectionInfo holds information necessary for connecting to an etcd server", + "urls": "URLs are the URLs for etcd", + "ca": "CA is a file containing trusted roots for the etcd server certificates", +} + +func (EtcdConnectionInfo) SwaggerDoc() map[string]string { + return map_EtcdConnectionInfo +} + +var map_EtcdStorageConfig = map[string]string{ + "": "EtcdStorageConfig holds the necessary configuration options for the etcd storage underlying OpenShift and Kubernetes", + "kubernetesStorageVersion": "KubernetesStorageVersion is the API version that Kube resources in etcd should be serialized to. This value should *not* be advanced until all clients in the cluster that read from etcd have code that allows them to read the new version.", + "kubernetesStoragePrefix": "KubernetesStoragePrefix is the path within etcd that the Kubernetes resources will be rooted under. This value, if changed, will mean existing objects in etcd will no longer be located. The default value is 'kubernetes.io'.", + "openShiftStorageVersion": "OpenShiftStorageVersion is the API version that OS resources in etcd should be serialized to. This value should *not* be advanced until all clients in the cluster that read from etcd have code that allows them to read the new version.", + "openShiftStoragePrefix": "OpenShiftStoragePrefix is the path within etcd that the OpenShift resources will be rooted under. This value, if changed, will mean existing objects in etcd will no longer be located. The default value is 'openshift.io'.", +} + +func (EtcdStorageConfig) SwaggerDoc() map[string]string { + return map_EtcdStorageConfig +} + +var map_GitHubIdentityProvider = map[string]string{ + "": "GitHubIdentityProvider provides identities for users authenticating using GitHub credentials\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "clientID": "ClientID is the oauth client ID", + "clientSecret": "ClientSecret is the oauth client secret", + "organizations": "Organizations optionally restricts which organizations are allowed to log in", + "teams": "Teams optionally restricts which teams are allowed to log in. Format is /.", + "hostname": "Hostname is the optional domain (e.g. \"mycompany.com\") for use with a hosted instance of GitHub Enterprise. It must match the GitHub Enterprise settings value that is configured at /setup/settings#hostname.", + "ca": "CA is the optional trusted certificate authority bundle to use when making requests to the server. If empty, the default system roots are used. This can only be configured when hostname is set to a non-empty value.", +} + +func (GitHubIdentityProvider) SwaggerDoc() map[string]string { + return map_GitHubIdentityProvider +} + +var map_GitLabIdentityProvider = map[string]string{ + "": "GitLabIdentityProvider provides identities for users authenticating using GitLab credentials\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "ca": "CA is the optional trusted certificate authority bundle to use when making requests to the server If empty, the default system roots are used", + "url": "URL is the oauth server base URL", + "clientID": "ClientID is the oauth client ID", + "clientSecret": "ClientSecret is the oauth client secret", + "legacy": "Legacy determines if OAuth2 or OIDC should be used If true, OAuth2 is used If false, OIDC is used If nil and the URL's host is gitlab.com, OIDC is used Otherwise, OAuth2 is used In a future release, nil will default to using OIDC Eventually this flag will be removed and only OIDC will be used", +} + +func (GitLabIdentityProvider) SwaggerDoc() map[string]string { + return map_GitLabIdentityProvider +} + +var map_GoogleIdentityProvider = map[string]string{ + "": "GoogleIdentityProvider provides identities for users authenticating using Google credentials\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "clientID": "ClientID is the oauth client ID", + "clientSecret": "ClientSecret is the oauth client secret", + "hostedDomain": "HostedDomain is the optional Google App domain (e.g. \"mycompany.com\") to restrict logins to", +} + +func (GoogleIdentityProvider) SwaggerDoc() map[string]string { + return map_GoogleIdentityProvider +} + +var map_GrantConfig = map[string]string{ + "": "GrantConfig holds the necessary configuration options for grant handlers", + "method": "Method determines the default strategy to use when an OAuth client requests a grant. This method will be used only if the specific OAuth client doesn't provide a strategy of their own. Valid grant handling methods are:\n - auto: always approves grant requests, useful for trusted clients\n - prompt: prompts the end user for approval of grant requests, useful for third-party clients\n - deny: always denies grant requests, useful for black-listed clients", + "serviceAccountMethod": "ServiceAccountMethod is used for determining client authorization for service account oauth client. It must be either: deny, prompt", +} + +func (GrantConfig) SwaggerDoc() map[string]string { + return map_GrantConfig +} + +var map_GroupResource = map[string]string{ + "": "GroupResource points to a resource by its name and API group.", + "group": "Group is the name of an API group", + "resource": "Resource is the name of a resource.", +} + +func (GroupResource) SwaggerDoc() map[string]string { + return map_GroupResource +} + +var map_HTPasswdPasswordIdentityProvider = map[string]string{ + "": "HTPasswdPasswordIdentityProvider provides identities for users authenticating using htpasswd credentials\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "file": "File is a reference to your htpasswd file", +} + +func (HTPasswdPasswordIdentityProvider) SwaggerDoc() map[string]string { + return map_HTPasswdPasswordIdentityProvider +} + +var map_HTTPServingInfo = map[string]string{ + "": "HTTPServingInfo holds configuration for serving HTTP", + "maxRequestsInFlight": "MaxRequestsInFlight is the number of concurrent requests allowed to the server. If zero, no limit.", + "requestTimeoutSeconds": "RequestTimeoutSeconds is the number of seconds before requests are timed out. The default is 60 minutes, if -1 there is no limit on requests.", +} + +func (HTTPServingInfo) SwaggerDoc() map[string]string { + return map_HTTPServingInfo +} + +var map_IdentityProvider = map[string]string{ + "": "IdentityProvider provides identities for users authenticating using credentials", + "name": "Name is used to qualify the identities returned by this provider", + "challenge": "UseAsChallenger indicates whether to issue WWW-Authenticate challenges for this provider", + "login": "UseAsLogin indicates whether to use this identity provider for unauthenticated browsers to login against", + "mappingMethod": "MappingMethod determines how identities from this provider are mapped to users", + "provider": "Provider contains the information about how to set up a specific identity provider", +} + +func (IdentityProvider) SwaggerDoc() map[string]string { + return map_IdentityProvider +} + +var map_ImageConfig = map[string]string{ + "": "ImageConfig holds the necessary configuration options for building image names for system components", + "format": "Format is the format of the name to be built for the system component", + "latest": "Latest determines if the latest tag will be pulled from the registry", +} + +func (ImageConfig) SwaggerDoc() map[string]string { + return map_ImageConfig +} + +var map_ImagePolicyConfig = map[string]string{ + "": "ImagePolicyConfig holds the necessary configuration options for limits and behavior for importing images", + "maxImagesBulkImportedPerRepository": "MaxImagesBulkImportedPerRepository controls the number of images that are imported when a user does a bulk import of a container repository. This number defaults to 50 to prevent users from importing large numbers of images accidentally. Set -1 for no limit.", + "disableScheduledImport": "DisableScheduledImport allows scheduled background import of images to be disabled.", + "scheduledImageImportMinimumIntervalSeconds": "ScheduledImageImportMinimumIntervalSeconds is the minimum number of seconds that can elapse between when image streams scheduled for background import are checked against the upstream repository. The default value is 15 minutes.", + "maxScheduledImageImportsPerMinute": "MaxScheduledImageImportsPerMinute is the maximum number of scheduled image streams that will be imported in the background per minute. The default value is 60. Set to -1 for unlimited.", + "allowedRegistriesForImport": "AllowedRegistriesForImport limits the container image registries that normal users may import images from. Set this list to the registries that you trust to contain valid Docker images and that you want applications to be able to import from. Users with permission to create Images or ImageStreamMappings via the API are not affected by this policy - typically only administrators or system integrations will have those permissions.", + "internalRegistryHostname": "InternalRegistryHostname sets the hostname for the default internal image registry. The value must be in \"hostname[:port]\" format. For backward compatibility, users can still use OPENSHIFT_DEFAULT_REGISTRY environment variable but this setting overrides the environment variable.", + "externalRegistryHostname": "ExternalRegistryHostname sets the hostname for the default external image registry. The external hostname should be set only when the image registry is exposed externally. The value is used in 'publicDockerImageRepository' field in ImageStreams. The value must be in \"hostname[:port]\" format.", + "additionalTrustedCA": "AdditionalTrustedCA is a path to a pem bundle file containing additional CAs that should be trusted during imagestream import.", +} + +func (ImagePolicyConfig) SwaggerDoc() map[string]string { + return map_ImagePolicyConfig +} + +var map_JenkinsPipelineConfig = map[string]string{ + "": "JenkinsPipelineConfig holds configuration for the Jenkins pipeline strategy", + "autoProvisionEnabled": "AutoProvisionEnabled determines whether a Jenkins server will be spawned from the provided template when the first build config in the project with type JenkinsPipeline is created. When not specified this option defaults to true.", + "templateNamespace": "TemplateNamespace contains the namespace name where the Jenkins template is stored", + "templateName": "TemplateName is the name of the default Jenkins template", + "serviceName": "ServiceName is the name of the Jenkins service OpenShift uses to detect whether a Jenkins pipeline handler has already been installed in a project. This value *must* match a service name in the provided template.", + "parameters": "Parameters specifies a set of optional parameters to the Jenkins template.", +} + +func (JenkinsPipelineConfig) SwaggerDoc() map[string]string { + return map_JenkinsPipelineConfig +} + +var map_KeystonePasswordIdentityProvider = map[string]string{ + "": "KeystonePasswordIdentityProvider provides identities for users authenticating using keystone password credentials\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "domainName": "Domain Name is required for keystone v3", + "useKeystoneIdentity": "UseKeystoneIdentity flag indicates that user should be authenticated by keystone ID, not by username", +} + +func (KeystonePasswordIdentityProvider) SwaggerDoc() map[string]string { + return map_KeystonePasswordIdentityProvider +} + +var map_KubeletConnectionInfo = map[string]string{ + "": "KubeletConnectionInfo holds information necessary for connecting to a kubelet", + "port": "Port is the port to connect to kubelets on", + "ca": "CA is the CA for verifying TLS connections to kubelets", +} + +func (KubeletConnectionInfo) SwaggerDoc() map[string]string { + return map_KubeletConnectionInfo +} + +var map_KubernetesMasterConfig = map[string]string{ + "": "KubernetesMasterConfig holds the necessary configuration options for the Kubernetes master", + "apiLevels": "APILevels is a list of API levels that should be enabled on startup: v1 as examples", + "disabledAPIGroupVersions": "DisabledAPIGroupVersions is a map of groups to the versions (or *) that should be disabled.", + "masterIP": "MasterIP is the public IP address of kubernetes stuff. If empty, the first result from net.InterfaceAddrs will be used.", + "masterEndpointReconcileTTL": "MasterEndpointReconcileTTL sets the time to live in seconds of an endpoint record recorded by each master. The endpoints are checked at an interval that is 2/3 of this value and this value defaults to 15s if unset. In very large clusters, this value may be increased to reduce the possibility that the master endpoint record expires (due to other load on the etcd server) and causes masters to drop in and out of the kubernetes service record. It is not recommended to set this value below 15s.", + "servicesSubnet": "ServicesSubnet is the subnet to use for assigning service IPs", + "servicesNodePortRange": "ServicesNodePortRange is the range to use for assigning service public ports on a host.", + "schedulerConfigFile": "SchedulerConfigFile points to a file that describes how to set up the scheduler. If empty, you get the default scheduling rules.", + "podEvictionTimeout": "PodEvictionTimeout controls grace period for deleting pods on failed nodes. It takes valid time duration string. If empty, you get the default pod eviction timeout.", + "proxyClientInfo": "ProxyClientInfo specifies the client cert/key to use when proxying to pods", + "apiServerArguments": "APIServerArguments are key value pairs that will be passed directly to the Kube apiserver that match the apiservers's command line arguments. These are not migrated, but if you reference a value that does not exist the server will not start. These values may override other settings in KubernetesMasterConfig which may cause invalid configurations.", + "controllerArguments": "ControllerArguments are key value pairs that will be passed directly to the Kube controller manager that match the controller manager's command line arguments. These are not migrated, but if you reference a value that does not exist the server will not start. These values may override other settings in KubernetesMasterConfig which may cause invalid configurations.", + "schedulerArguments": "SchedulerArguments are key value pairs that will be passed directly to the Kube scheduler that match the scheduler's command line arguments. These are not migrated, but if you reference a value that does not exist the server will not start. These values may override other settings in KubernetesMasterConfig which may cause invalid configurations.", +} + +func (KubernetesMasterConfig) SwaggerDoc() map[string]string { + return map_KubernetesMasterConfig +} + +var map_LDAPAttributeMapping = map[string]string{ + "": "LDAPAttributeMapping maps LDAP attributes to OpenShift identity fields", + "id": "ID is the list of attributes whose values should be used as the user ID. Required. LDAP standard identity attribute is \"dn\"", + "preferredUsername": "PreferredUsername is the list of attributes whose values should be used as the preferred username. LDAP standard login attribute is \"uid\"", + "name": "Name is the list of attributes whose values should be used as the display name. Optional. If unspecified, no display name is set for the identity LDAP standard display name attribute is \"cn\"", + "email": "Email is the list of attributes whose values should be used as the email address. Optional. If unspecified, no email is set for the identity", +} + +func (LDAPAttributeMapping) SwaggerDoc() map[string]string { + return map_LDAPAttributeMapping +} + +var map_LDAPPasswordIdentityProvider = map[string]string{ + "": "LDAPPasswordIdentityProvider provides identities for users authenticating using LDAP credentials\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "url": "URL is an RFC 2255 URL which specifies the LDAP search parameters to use. The syntax of the URL is\n ldap://host:port/basedn?attribute?scope?filter", + "bindDN": "BindDN is an optional DN to bind with during the search phase.", + "bindPassword": "BindPassword is an optional password to bind with during the search phase.", + "insecure": "Insecure, if true, indicates the connection should not use TLS. Cannot be set to true with a URL scheme of \"ldaps://\" If false, \"ldaps://\" URLs connect using TLS, and \"ldap://\" URLs are upgraded to a TLS connection using StartTLS as specified in https://tools.ietf.org/html/rfc2830", + "ca": "CA is the optional trusted certificate authority bundle to use when making requests to the server If empty, the default system roots are used", + "attributes": "Attributes maps LDAP attributes to identities", +} + +func (LDAPPasswordIdentityProvider) SwaggerDoc() map[string]string { + return map_LDAPPasswordIdentityProvider +} + +var map_LDAPQuery = map[string]string{ + "": "LDAPQuery holds the options necessary to build an LDAP query", + "baseDN": "The DN of the branch of the directory where all searches should start from", + "scope": "The (optional) scope of the search. Can be: base: only the base object, one: all object on the base level, sub: the entire subtree Defaults to the entire subtree if not set", + "derefAliases": "The (optional) behavior of the search with regards to alisases. Can be: never: never dereference aliases, search: only dereference in searching, base: only dereference in finding the base object, always: always dereference Defaults to always dereferencing if not set", + "timeout": "TimeLimit holds the limit of time in seconds that any request to the server can remain outstanding before the wait for a response is given up. If this is 0, no client-side limit is imposed", + "filter": "Filter is a valid LDAP search filter that retrieves all relevant entries from the LDAP server with the base DN", + "pageSize": "PageSize is the maximum preferred page size, measured in LDAP entries. A page size of 0 means no paging will be done.", +} + +func (LDAPQuery) SwaggerDoc() map[string]string { + return map_LDAPQuery +} + +var map_LDAPSyncConfig = map[string]string{ + "": "LDAPSyncConfig holds the necessary configuration options to define an LDAP group sync\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "url": "Host is the scheme, host and port of the LDAP server to connect to: scheme://host:port", + "bindDN": "BindDN is an optional DN to bind to the LDAP server with", + "bindPassword": "BindPassword is an optional password to bind with during the search phase.", + "insecure": "Insecure, if true, indicates the connection should not use TLS. Cannot be set to true with a URL scheme of \"ldaps://\" If false, \"ldaps://\" URLs connect using TLS, and \"ldap://\" URLs are upgraded to a TLS connection using StartTLS as specified in https://tools.ietf.org/html/rfc2830", + "ca": "CA is the optional trusted certificate authority bundle to use when making requests to the server If empty, the default system roots are used", + "groupUIDNameMapping": "LDAPGroupUIDToOpenShiftGroupNameMapping is an optional direct mapping of LDAP group UIDs to OpenShift Group names", + "rfc2307": "RFC2307Config holds the configuration for extracting data from an LDAP server set up in a fashion similar to RFC2307: first-class group and user entries, with group membership determined by a multi-valued attribute on the group entry listing its members", + "activeDirectory": "ActiveDirectoryConfig holds the configuration for extracting data from an LDAP server set up in a fashion similar to that used in Active Directory: first-class user entries, with group membership determined by a multi-valued attribute on members listing groups they are a member of", + "augmentedActiveDirectory": "AugmentedActiveDirectoryConfig holds the configuration for extracting data from an LDAP server set up in a fashion similar to that used in Active Directory as described above, with one addition: first-class group entries exist and are used to hold metadata but not group membership", +} + +func (LDAPSyncConfig) SwaggerDoc() map[string]string { + return map_LDAPSyncConfig +} + +var map_LocalQuota = map[string]string{ + "": "LocalQuota contains options for controlling local volume quota on the node.", + "perFSGroup": "FSGroup can be specified to enable a quota on local storage use per unique FSGroup ID. At present this is only implemented for emptyDir volumes, and if the underlying volumeDirectory is on an XFS filesystem.", +} + +func (LocalQuota) SwaggerDoc() map[string]string { + return map_LocalQuota +} + +var map_MasterAuthConfig = map[string]string{ + "": "MasterAuthConfig configures authentication options in addition to the standard oauth token and client certificate authenticators", + "requestHeader": "RequestHeader holds options for setting up a front proxy against the API. It is optional.", + "webhookTokenAuthenticators": "WebhookTokenAuthnConfig, if present configures remote token reviewers", + "oauthMetadataFile": "OAuthMetadataFile is a path to a file containing the discovery endpoint for OAuth 2.0 Authorization Server Metadata for an external OAuth server. See IETF Draft: // https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 This option is mutually exclusive with OAuthConfig", +} + +func (MasterAuthConfig) SwaggerDoc() map[string]string { + return map_MasterAuthConfig +} + +var map_MasterClients = map[string]string{ + "": "MasterClients holds references to `.kubeconfig` files that qualify master clients for OpenShift and Kubernetes", + "openshiftLoopbackKubeConfig": "OpenShiftLoopbackKubeConfig is a .kubeconfig filename for system components to loopback to this master", + "openshiftLoopbackClientConnectionOverrides": "OpenShiftLoopbackClientConnectionOverrides specifies client overrides for system components to loop back to this master.", +} + +func (MasterClients) SwaggerDoc() map[string]string { + return map_MasterClients +} + +var map_MasterConfig = map[string]string{ + "": "MasterConfig holds the necessary configuration options for the OpenShift master\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "servingInfo": "ServingInfo describes how to start serving", + "authConfig": "AuthConfig configures authentication options in addition to the standard oauth token and client certificate authenticators", + "aggregatorConfig": "AggregatorConfig has options for configuring the aggregator component of the API server.", + "corsAllowedOrigins": "CORSAllowedOrigins", + "apiLevels": "APILevels is a list of API levels that should be enabled on startup: v1 as examples", + "masterPublicURL": "MasterPublicURL is how clients can access the OpenShift API server", + "controllers": "Controllers is a list of the controllers that should be started. If set to \"none\", no controllers will start automatically. The default value is \"*\" which will start all controllers. When using \"*\", you may exclude controllers by prepending a \"-\" in front of their name. No other values are recognized at this time.", + "admissionConfig": "AdmissionConfig contains admission control plugin configuration.", + "controllerConfig": "ControllerConfig holds configuration values for controllers", + "etcdStorageConfig": "EtcdStorageConfig contains information about how API resources are stored in Etcd. These values are only relevant when etcd is the backing store for the cluster.", + "etcdClientInfo": "EtcdClientInfo contains information about how to connect to etcd", + "kubeletClientInfo": "KubeletClientInfo contains information about how to connect to kubelets", + "kubernetesMasterConfig": "KubernetesMasterConfig, if present start the kubernetes master in this process", + "etcdConfig": "EtcdConfig, if present start etcd in this process", + "oauthConfig": "OAuthConfig, if present start the /oauth endpoint in this process", + "dnsConfig": "DNSConfig, if present start the DNS server in this process", + "serviceAccountConfig": "ServiceAccountConfig holds options related to service accounts", + "masterClients": "MasterClients holds all the client connection information for controllers and other system components", + "imageConfig": "ImageConfig holds options that describe how to build image names for system components", + "imagePolicyConfig": "ImagePolicyConfig controls limits and behavior for importing images", + "policyConfig": "PolicyConfig holds information about where to locate critical pieces of bootstrapping policy", + "projectConfig": "ProjectConfig holds information about project creation and defaults", + "routingConfig": "RoutingConfig holds information about routing and route generation", + "networkConfig": "NetworkConfig to be passed to the compiled in network plugin", + "volumeConfig": "MasterVolumeConfig contains options for configuring volume plugins in the master node.", + "jenkinsPipelineConfig": "JenkinsPipelineConfig holds information about the default Jenkins template used for JenkinsPipeline build strategy.", + "auditConfig": "AuditConfig holds information related to auditing capabilities.", +} + +func (MasterConfig) SwaggerDoc() map[string]string { + return map_MasterConfig +} + +var map_MasterNetworkConfig = map[string]string{ + "": "MasterNetworkConfig to be passed to the compiled in network plugin", + "networkPluginName": "NetworkPluginName is the name of the network plugin to use", + "clusterNetworkCIDR": "ClusterNetworkCIDR is the CIDR string to specify the global overlay network's L3 space. Deprecated, but maintained for backwards compatibility, use ClusterNetworks instead.", + "clusterNetworks": "ClusterNetworks is a list of ClusterNetwork objects that defines the global overlay network's L3 space by specifying a set of CIDR and netmasks that the SDN can allocate addressed from. If this is specified, then ClusterNetworkCIDR and HostSubnetLength may not be set.", + "hostSubnetLength": "HostSubnetLength is the number of bits to allocate to each host's subnet e.g. 8 would mean a /24 network on the host. Deprecated, but maintained for backwards compatibility, use ClusterNetworks instead.", + "serviceNetworkCIDR": "ServiceNetwork is the CIDR string to specify the service networks", + "externalIPNetworkCIDRs": "ExternalIPNetworkCIDRs controls what values are acceptable for the service external IP field. If empty, no externalIP may be set. It may contain a list of CIDRs which are checked for access. If a CIDR is prefixed with !, IPs in that CIDR will be rejected. Rejections will be applied first, then the IP checked against one of the allowed CIDRs. You should ensure this range does not overlap with your nodes, pods, or service CIDRs for security reasons.", + "ingressIPNetworkCIDR": "IngressIPNetworkCIDR controls the range to assign ingress ips from for services of type LoadBalancer on bare metal. If empty, ingress ips will not be assigned. It may contain a single CIDR that will be allocated from. For security reasons, you should ensure that this range does not overlap with the CIDRs reserved for external ips, nodes, pods, or services.", + "vxlanPort": "VXLANPort is the VXLAN port used by the cluster defaults. If it is not set, 4789 is the default value", +} + +func (MasterNetworkConfig) SwaggerDoc() map[string]string { + return map_MasterNetworkConfig +} + +var map_MasterVolumeConfig = map[string]string{ + "": "MasterVolumeConfig contains options for configuring volume plugins in the master node.", + "dynamicProvisioningEnabled": "DynamicProvisioningEnabled is a boolean that toggles dynamic provisioning off when false, defaults to true", +} + +func (MasterVolumeConfig) SwaggerDoc() map[string]string { + return map_MasterVolumeConfig +} + +var map_NamedCertificate = map[string]string{ + "": "NamedCertificate specifies a certificate/key, and the names it should be served for", + "names": "Names is a list of DNS names this certificate should be used to secure A name can be a normal DNS name, or can contain leading wildcard segments.", +} + +func (NamedCertificate) SwaggerDoc() map[string]string { + return map_NamedCertificate +} + +var map_NodeAuthConfig = map[string]string{ + "": "NodeAuthConfig holds authn/authz configuration options", + "authenticationCacheTTL": "AuthenticationCacheTTL indicates how long an authentication result should be cached. It takes a valid time duration string (e.g. \"5m\"). If empty, you get the default timeout. If zero (e.g. \"0m\"), caching is disabled", + "authenticationCacheSize": "AuthenticationCacheSize indicates how many authentication results should be cached. If 0, the default cache size is used.", + "authorizationCacheTTL": "AuthorizationCacheTTL indicates how long an authorization result should be cached. It takes a valid time duration string (e.g. \"5m\"). If empty, you get the default timeout. If zero (e.g. \"0m\"), caching is disabled", + "authorizationCacheSize": "AuthorizationCacheSize indicates how many authorization results should be cached. If 0, the default cache size is used.", +} + +func (NodeAuthConfig) SwaggerDoc() map[string]string { + return map_NodeAuthConfig +} + +var map_NodeConfig = map[string]string{ + "": "NodeConfig is the fully specified config starting an OpenShift node\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "nodeName": "NodeName is the value used to identify this particular node in the cluster. If possible, this should be your fully qualified hostname. If you're describing a set of static nodes to the master, this value must match one of the values in the list", + "nodeIP": "Node may have multiple IPs, specify the IP to use for pod traffic routing If not specified, network parse/lookup on the nodeName is performed and the first non-loopback address is used", + "servingInfo": "ServingInfo describes how to start serving", + "masterKubeConfig": "MasterKubeConfig is a filename for the .kubeconfig file that describes how to connect this node to the master", + "masterClientConnectionOverrides": "MasterClientConnectionOverrides provides overrides to the client connection used to connect to the master.", + "dnsDomain": "DNSDomain holds the domain suffix that will be used for the DNS search path inside each container. Defaults to 'cluster.local'.", + "dnsIP": "DNSIP is the IP address that pods will use to access cluster DNS. Defaults to the service IP of the Kubernetes master. This IP must be listening on port 53 for compatibility with libc resolvers (which cannot be configured to resolve names from any other port). When running more complex local DNS configurations, this is often set to the local address of a DNS proxy like dnsmasq, which then will consult either the local DNS (see dnsBindAddress) or the master DNS.", + "dnsBindAddress": "DNSBindAddress is the ip:port to serve DNS on. If this is not set, the DNS server will not be started. Because most DNS resolvers will only listen on port 53, if you select an alternative port you will need a DNS proxy like dnsmasq to answer queries for containers. A common configuration is dnsmasq configured on a node IP listening on 53 and delegating queries for dnsDomain to this process, while sending other queries to the host environments nameservers.", + "dnsNameservers": "DNSNameservers is a list of ip:port values of recursive nameservers to forward queries to when running a local DNS server if dnsBindAddress is set. If this value is empty, the DNS server will default to the nameservers listed in /etc/resolv.conf. If you have configured dnsmasq or another DNS proxy on the system, this value should be set to the upstream nameservers dnsmasq resolves with.", + "dnsRecursiveResolvConf": "DNSRecursiveResolvConf is a path to a resolv.conf file that contains settings for an upstream server. Only the nameservers and port fields are used. The file must exist and parse correctly. It adds extra nameservers to DNSNameservers if set.", + "networkPluginName": "Deprecated and maintained for backward compatibility, use NetworkConfig.NetworkPluginName instead", + "networkConfig": "NetworkConfig provides network options for the node", + "volumeDirectory": "VolumeDirectory is the directory that volumes will be stored under", + "imageConfig": "ImageConfig holds options that describe how to build image names for system components", + "allowDisabledDocker": "AllowDisabledDocker if true, the Kubelet will ignore errors from Docker. This means that a node can start on a machine that doesn't have docker started.", + "podManifestConfig": "PodManifestConfig holds the configuration for enabling the Kubelet to create pods based from a manifest file(s) placed locally on the node", + "authConfig": "AuthConfig holds authn/authz configuration options", + "dockerConfig": "DockerConfig holds Docker related configuration options.", + "kubeletArguments": "KubeletArguments are key value pairs that will be passed directly to the Kubelet that match the Kubelet's command line arguments. These are not migrated or validated, so if you use them they may become invalid. These values override other settings in NodeConfig which may cause invalid configurations.", + "proxyArguments": "ProxyArguments are key value pairs that will be passed directly to the Proxy that match the Proxy's command line arguments. These are not migrated or validated, so if you use them they may become invalid. These values override other settings in NodeConfig which may cause invalid configurations.", + "iptablesSyncPeriod": "IPTablesSyncPeriod is how often iptable rules are refreshed", + "enableUnidling": "EnableUnidling controls whether or not the hybrid unidling proxy will be set up", + "volumeConfig": "VolumeConfig contains options for configuring volumes on the node.", +} + +func (NodeConfig) SwaggerDoc() map[string]string { + return map_NodeConfig +} + +var map_NodeNetworkConfig = map[string]string{ + "": "NodeNetworkConfig provides network options for the node", + "networkPluginName": "NetworkPluginName is a string specifying the networking plugin", + "mtu": "Maximum transmission unit for the network packets", +} + +func (NodeNetworkConfig) SwaggerDoc() map[string]string { + return map_NodeNetworkConfig +} + +var map_NodeVolumeConfig = map[string]string{ + "": "NodeVolumeConfig contains options for configuring volumes on the node.", + "localQuota": "LocalQuota contains options for controlling local volume quota on the node.", +} + +func (NodeVolumeConfig) SwaggerDoc() map[string]string { + return map_NodeVolumeConfig +} + +var map_OAuthConfig = map[string]string{ + "": "OAuthConfig holds the necessary configuration options for OAuth authentication", + "masterCA": "MasterCA is the CA for verifying the TLS connection back to the MasterURL.", + "masterURL": "MasterURL is used for making server-to-server calls to exchange authorization codes for access tokens", + "masterPublicURL": "MasterPublicURL is used for building valid client redirect URLs for internal and external access", + "assetPublicURL": "AssetPublicURL is used for building valid client redirect URLs for external access", + "alwaysShowProviderSelection": "AlwaysShowProviderSelection will force the provider selection page to render even when there is only a single provider.", + "identityProviders": "IdentityProviders is an ordered list of ways for a user to identify themselves", + "grantConfig": "GrantConfig describes how to handle grants", + "sessionConfig": "SessionConfig hold information about configuring sessions.", + "tokenConfig": "TokenConfig contains options for authorization and access tokens", + "templates": "Templates allow you to customize pages like the login page.", +} + +func (OAuthConfig) SwaggerDoc() map[string]string { + return map_OAuthConfig +} + +var map_OAuthTemplates = map[string]string{ + "": "OAuthTemplates allow for customization of pages like the login page", + "login": "Login is a path to a file containing a go template used to render the login page. If unspecified, the default login page is used.", + "providerSelection": "ProviderSelection is a path to a file containing a go template used to render the provider selection page. If unspecified, the default provider selection page is used.", + "error": "Error is a path to a file containing a go template used to render error pages during the authentication or grant flow If unspecified, the default error page is used.", +} + +func (OAuthTemplates) SwaggerDoc() map[string]string { + return map_OAuthTemplates +} + +var map_OpenIDClaims = map[string]string{ + "": "OpenIDClaims contains a list of OpenID claims to use when authenticating with an OpenID identity provider", + "id": "ID is the list of claims whose values should be used as the user ID. Required. OpenID standard identity claim is \"sub\"", + "preferredUsername": "PreferredUsername is the list of claims whose values should be used as the preferred username. If unspecified, the preferred username is determined from the value of the id claim", + "name": "Name is the list of claims whose values should be used as the display name. Optional. If unspecified, no display name is set for the identity", + "email": "Email is the list of claims whose values should be used as the email address. Optional. If unspecified, no email is set for the identity", +} + +func (OpenIDClaims) SwaggerDoc() map[string]string { + return map_OpenIDClaims +} + +var map_OpenIDIdentityProvider = map[string]string{ + "": "OpenIDIdentityProvider provides identities for users authenticating using OpenID credentials\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "ca": "CA is the optional trusted certificate authority bundle to use when making requests to the server If empty, the default system roots are used", + "clientID": "ClientID is the oauth client ID", + "clientSecret": "ClientSecret is the oauth client secret", + "extraScopes": "ExtraScopes are any scopes to request in addition to the standard \"openid\" scope.", + "extraAuthorizeParameters": "ExtraAuthorizeParameters are any custom parameters to add to the authorize request.", + "urls": "URLs to use to authenticate", + "claims": "Claims mappings", +} + +func (OpenIDIdentityProvider) SwaggerDoc() map[string]string { + return map_OpenIDIdentityProvider +} + +var map_OpenIDURLs = map[string]string{ + "": "OpenIDURLs are URLs to use when authenticating with an OpenID identity provider", + "authorize": "Authorize is the oauth authorization URL", + "token": "Token is the oauth token granting URL", + "userInfo": "UserInfo is the optional userinfo URL. If present, a granted access_token is used to request claims If empty, a granted id_token is parsed for claims", +} + +func (OpenIDURLs) SwaggerDoc() map[string]string { + return map_OpenIDURLs +} + +var map_PodManifestConfig = map[string]string{ + "": "PodManifestConfig holds the necessary configuration options for using pod manifests", + "path": "Path specifies the path for the pod manifest file or directory If its a directory, its expected to contain on or more manifest files This is used by the Kubelet to create pods on the node", + "fileCheckIntervalSeconds": "FileCheckIntervalSeconds is the interval in seconds for checking the manifest file(s) for new data The interval needs to be a positive value", +} + +func (PodManifestConfig) SwaggerDoc() map[string]string { + return map_PodManifestConfig +} + +var map_PolicyConfig = map[string]string{ + "": "\n holds the necessary configuration options for", + "userAgentMatchingConfig": "UserAgentMatchingConfig controls how API calls from *voluntarily* identifying clients will be handled. THIS DOES NOT DEFEND AGAINST MALICIOUS CLIENTS!", +} + +func (PolicyConfig) SwaggerDoc() map[string]string { + return map_PolicyConfig +} + +var map_ProjectConfig = map[string]string{ + "": "\n holds the necessary configuration options for", + "defaultNodeSelector": "DefaultNodeSelector holds default project node label selector", + "projectRequestMessage": "ProjectRequestMessage is the string presented to a user if they are unable to request a project via the projectrequest api endpoint", + "projectRequestTemplate": "ProjectRequestTemplate is the template to use for creating projects in response to projectrequest. It is in the format namespace/template and it is optional. If it is not specified, a default template is used.", + "securityAllocator": "SecurityAllocator controls the automatic allocation of UIDs and MCS labels to a project. If nil, allocation is disabled.", +} + +func (ProjectConfig) SwaggerDoc() map[string]string { + return map_ProjectConfig +} + +var map_RFC2307Config = map[string]string{ + "": "RFC2307Config holds the necessary configuration options to define how an LDAP group sync interacts with an LDAP server using the RFC2307 schema", + "groupsQuery": "AllGroupsQuery holds the template for an LDAP query that returns group entries.", + "groupUIDAttribute": "GroupUIDAttributes defines which attribute on an LDAP group entry will be interpreted as its unique identifier. (ldapGroupUID)", + "groupNameAttributes": "GroupNameAttributes defines which attributes on an LDAP group entry will be interpreted as its name to use for an OpenShift group", + "groupMembershipAttributes": "GroupMembershipAttributes defines which attributes on an LDAP group entry will be interpreted as its members. The values contained in those attributes must be queryable by your UserUIDAttribute", + "usersQuery": "AllUsersQuery holds the template for an LDAP query that returns user entries.", + "userUIDAttribute": "UserUIDAttribute defines which attribute on an LDAP user entry will be interpreted as its unique identifier. It must correspond to values that will be found from the GroupMembershipAttributes", + "userNameAttributes": "UserNameAttributes defines which attributes on an LDAP user entry will be used, in order, as its OpenShift user name. The first attribute with a non-empty value is used. This should match your PreferredUsername setting for your LDAPPasswordIdentityProvider", + "tolerateMemberNotFoundErrors": "TolerateMemberNotFoundErrors determines the behavior of the LDAP sync job when missing user entries are encountered. If 'true', an LDAP query for users that doesn't find any will be tolerated and an only and error will be logged. If 'false', the LDAP sync job will fail if a query for users doesn't find any. The default value is 'false'. Misconfigured LDAP sync jobs with this flag set to 'true' can cause group membership to be removed, so it is recommended to use this flag with caution.", + "tolerateMemberOutOfScopeErrors": "TolerateMemberOutOfScopeErrors determines the behavior of the LDAP sync job when out-of-scope user entries are encountered. If 'true', an LDAP query for a user that falls outside of the base DN given for the all user query will be tolerated and only an error will be logged. If 'false', the LDAP sync job will fail if a user query would search outside of the base DN specified by the all user query. Misconfigured LDAP sync jobs with this flag set to 'true' can result in groups missing users, so it is recommended to use this flag with caution.", +} + +func (RFC2307Config) SwaggerDoc() map[string]string { + return map_RFC2307Config +} + +var map_RegistryLocation = map[string]string{ + "": "RegistryLocation contains a location of the registry specified by the registry domain name. The domain name might include wildcards, like '*' or '??'.", + "domainName": "DomainName specifies a domain name for the registry In case the registry use non-standard (80 or 443) port, the port should be included in the domain name as well.", + "insecure": "Insecure indicates whether the registry is secure (https) or insecure (http) By default (if not specified) the registry is assumed as secure.", +} + +func (RegistryLocation) SwaggerDoc() map[string]string { + return map_RegistryLocation +} + +var map_RemoteConnectionInfo = map[string]string{ + "": "RemoteConnectionInfo holds information necessary for establishing a remote connection", + "url": "URL is the remote URL to connect to", + "ca": "CA is the CA for verifying TLS connections", +} + +func (RemoteConnectionInfo) SwaggerDoc() map[string]string { + return map_RemoteConnectionInfo +} + +var map_RequestHeaderAuthenticationOptions = map[string]string{ + "": "RequestHeaderAuthenticationOptions provides options for setting up a front proxy against the entire API instead of against the /oauth endpoint.", + "clientCA": "ClientCA is a file with the trusted signer certs. It is required.", + "clientCommonNames": "ClientCommonNames is a required list of common names to require a match from.", + "usernameHeaders": "UsernameHeaders is the list of headers to check for user information. First hit wins.", + "groupHeaders": "GroupNameHeader is the set of headers to check for group information. All are unioned.", + "extraHeaderPrefixes": "ExtraHeaderPrefixes is the set of request header prefixes to inspect for user extra. X-Remote-Extra- is suggested.", +} + +func (RequestHeaderAuthenticationOptions) SwaggerDoc() map[string]string { + return map_RequestHeaderAuthenticationOptions +} + +var map_RequestHeaderIdentityProvider = map[string]string{ + "": "RequestHeaderIdentityProvider provides identities for users authenticating using request header credentials\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "loginURL": "LoginURL is a URL to redirect unauthenticated /authorize requests to Unauthenticated requests from OAuth clients which expect interactive logins will be redirected here ${url} is replaced with the current URL, escaped to be safe in a query parameter\n https://www.example.com/sso-login?then=${url}\n${query} is replaced with the current query string\n https://www.example.com/auth-proxy/oauth/authorize?${query}", + "challengeURL": "ChallengeURL is a URL to redirect unauthenticated /authorize requests to Unauthenticated requests from OAuth clients which expect WWW-Authenticate challenges will be redirected here ${url} is replaced with the current URL, escaped to be safe in a query parameter\n https://www.example.com/sso-login?then=${url}\n${query} is replaced with the current query string\n https://www.example.com/auth-proxy/oauth/authorize?${query}", + "clientCA": "ClientCA is a file with the trusted signer certs. If empty, no request verification is done, and any direct request to the OAuth server can impersonate any identity from this provider, merely by setting a request header.", + "clientCommonNames": "ClientCommonNames is an optional list of common names to require a match from. If empty, any client certificate validated against the clientCA bundle is considered authoritative.", + "headers": "Headers is the set of headers to check for identity information", + "preferredUsernameHeaders": "PreferredUsernameHeaders is the set of headers to check for the preferred username", + "nameHeaders": "NameHeaders is the set of headers to check for the display name", + "emailHeaders": "EmailHeaders is the set of headers to check for the email address", +} + +func (RequestHeaderIdentityProvider) SwaggerDoc() map[string]string { + return map_RequestHeaderIdentityProvider +} + +var map_RoutingConfig = map[string]string{ + "": "RoutingConfig holds the necessary configuration options for routing to subdomains", + "subdomain": "Subdomain is the suffix appended to $service.$namespace. to form the default route hostname DEPRECATED: This field is being replaced by routers setting their own defaults. This is the \"default\" route.", +} + +func (RoutingConfig) SwaggerDoc() map[string]string { + return map_RoutingConfig +} + +var map_SecurityAllocator = map[string]string{ + "": "SecurityAllocator controls the automatic allocation of UIDs and MCS labels to a project. If nil, allocation is disabled.", + "uidAllocatorRange": "UIDAllocatorRange defines the total set of Unix user IDs (UIDs) that will be allocated to projects automatically, and the size of the block each namespace gets. For example, 1000-1999/10 will allocate ten UIDs per namespace, and will be able to allocate up to 100 blocks before running out of space. The default is to allocate from 1 billion to 2 billion in 10k blocks (which is the expected size of the ranges container images will use once user namespaces are started).", + "mcsAllocatorRange": "MCSAllocatorRange defines the range of MCS categories that will be assigned to namespaces. The format is \"/[,]\". The default is \"s0/2\" and will allocate from c0 -> c1023, which means a total of 535k labels are available (1024 choose 2 ~ 535k). If this value is changed after startup, new projects may receive labels that are already allocated to other projects. Prefix may be any valid SELinux set of terms (including user, role, and type), although leaving them as the default will allow the server to set them automatically.\n\nExamples: * s0:/2 - Allocate labels from s0:c0,c0 to s0:c511,c511 * s0:/2,512 - Allocate labels from s0:c0,c0,c0 to s0:c511,c511,511", + "mcsLabelsPerProject": "MCSLabelsPerProject defines the number of labels that should be reserved per project. The default is 5 to match the default UID and MCS ranges (100k namespaces, 535k/5 labels).", +} + +func (SecurityAllocator) SwaggerDoc() map[string]string { + return map_SecurityAllocator +} + +var map_ServiceAccountConfig = map[string]string{ + "": "ServiceAccountConfig holds the necessary configuration options for a service account", + "managedNames": "ManagedNames is a list of service account names that will be auto-created in every namespace. If no names are specified, the ServiceAccountsController will not be started.", + "limitSecretReferences": "LimitSecretReferences controls whether or not to allow a service account to reference any secret in a namespace without explicitly referencing them", + "privateKeyFile": "PrivateKeyFile is a file containing a PEM-encoded private RSA key, used to sign service account tokens. If no private key is specified, the service account TokensController will not be started.", + "publicKeyFiles": "PublicKeyFiles is a list of files, each containing a PEM-encoded public RSA key. (If any file contains a private key, the public portion of the key is used) The list of public keys is used to verify presented service account tokens. Each key is tried in order until the list is exhausted or verification succeeds. If no keys are specified, no service account authentication will be available.", + "masterCA": "MasterCA is the CA for verifying the TLS connection back to the master. The service account controller will automatically inject the contents of this file into pods so they can verify connections to the master.", +} + +func (ServiceAccountConfig) SwaggerDoc() map[string]string { + return map_ServiceAccountConfig +} + +var map_ServiceServingCert = map[string]string{ + "": "ServiceServingCert holds configuration for service serving cert signer which creates cert/key pairs for pods fulfilling a service to serve with.", + "signer": "Signer holds the signing information used to automatically sign serving certificates. If this value is nil, then certs are not signed automatically.", +} + +func (ServiceServingCert) SwaggerDoc() map[string]string { + return map_ServiceServingCert +} + +var map_ServingInfo = map[string]string{ + "": "ServingInfo holds information about serving web pages", + "bindAddress": "BindAddress is the ip:port to serve on", + "bindNetwork": "BindNetwork is the type of network to bind to - defaults to \"tcp4\", accepts \"tcp\", \"tcp4\", and \"tcp6\"", + "clientCA": "ClientCA is the certificate bundle for all the signers that you'll recognize for incoming client certificates", + "namedCertificates": "NamedCertificates is a list of certificates to use to secure requests to specific hostnames", + "minTLSVersion": "MinTLSVersion is the minimum TLS version supported. Values must match version names from https://golang.org/pkg/crypto/tls/#pkg-constants", + "cipherSuites": "CipherSuites contains an overridden list of ciphers for the server to support. Values must match cipher suite IDs from https://golang.org/pkg/crypto/tls/#pkg-constants", +} + +func (ServingInfo) SwaggerDoc() map[string]string { + return map_ServingInfo +} + +var map_SessionConfig = map[string]string{ + "": "SessionConfig specifies options for cookie-based sessions. Used by AuthRequestHandlerSession", + "sessionSecretsFile": "SessionSecretsFile is a reference to a file containing a serialized SessionSecrets object If no file is specified, a random signing and encryption key are generated at each server start", + "sessionMaxAgeSeconds": "SessionMaxAgeSeconds specifies how long created sessions last. Used by AuthRequestHandlerSession", + "sessionName": "SessionName is the cookie name used to store the session", +} + +func (SessionConfig) SwaggerDoc() map[string]string { + return map_SessionConfig +} + +var map_SessionSecret = map[string]string{ + "": "SessionSecret is a secret used to authenticate/decrypt cookie-based sessions", + "authentication": "Authentication is used to authenticate sessions using HMAC. Recommended to use a secret with 32 or 64 bytes.", + "encryption": "Encryption is used to encrypt sessions. Must be 16, 24, or 32 characters long, to select AES-128, AES-", +} + +func (SessionSecret) SwaggerDoc() map[string]string { + return map_SessionSecret +} + +var map_SessionSecrets = map[string]string{ + "": "SessionSecrets list the secrets to use to sign/encrypt and authenticate/decrypt created sessions.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "secrets": "Secrets is a list of secrets New sessions are signed and encrypted using the first secret. Existing sessions are decrypted/authenticated by each secret until one succeeds. This allows rotating secrets.", +} + +func (SessionSecrets) SwaggerDoc() map[string]string { + return map_SessionSecrets +} + +var map_SourceStrategyDefaultsConfig = map[string]string{ + "": "SourceStrategyDefaultsConfig contains values that apply to builds using the source strategy.", + "incremental": "incremental indicates if s2i build strategies should perform an incremental build or not", +} + +func (SourceStrategyDefaultsConfig) SwaggerDoc() map[string]string { + return map_SourceStrategyDefaultsConfig +} + +var map_StringSource = map[string]string{ + "": "StringSource allows specifying a string inline, or externally via env var or file. When it contains only a string value, it marshals to a simple JSON string.", +} + +func (StringSource) SwaggerDoc() map[string]string { + return map_StringSource +} + +var map_StringSourceSpec = map[string]string{ + "": "StringSourceSpec specifies a string value, or external location", + "value": "Value specifies the cleartext value, or an encrypted value if keyFile is specified.", + "env": "Env specifies an envvar containing the cleartext value, or an encrypted value if the keyFile is specified.", + "file": "File references a file containing the cleartext value, or an encrypted value if a keyFile is specified.", + "keyFile": "KeyFile references a file containing the key to use to decrypt the value.", +} + +func (StringSourceSpec) SwaggerDoc() map[string]string { + return map_StringSourceSpec +} + +var map_TokenConfig = map[string]string{ + "": "TokenConfig holds the necessary configuration options for authorization and access tokens", + "authorizeTokenMaxAgeSeconds": "AuthorizeTokenMaxAgeSeconds defines the maximum age of authorize tokens", + "accessTokenMaxAgeSeconds": "AccessTokenMaxAgeSeconds defines the maximum age of access tokens", + "accessTokenInactivityTimeoutSeconds": "AccessTokenInactivityTimeoutSeconds defined the default token inactivity timeout for tokens granted by any client. Setting it to nil means the feature is completely disabled (default) The default setting can be overriden on OAuthClient basis. The value represents the maximum amount of time that can occur between consecutive uses of the token. Tokens become invalid if they are not used within this temporal window. The user will need to acquire a new token to regain access once a token times out. Valid values are: - 0: Tokens never time out - X: Tokens time out if there is no activity for X seconds The current minimum allowed value for X is 300 (5 minutes)", +} + +func (TokenConfig) SwaggerDoc() map[string]string { + return map_TokenConfig +} + +var map_UserAgentDenyRule = map[string]string{ + "": "UserAgentDenyRule adds a rejection message that can be used to help a user figure out how to get an approved client", + "rejectionMessage": "RejectionMessage is the message shown when rejecting a client. If it is not a set, the default message is used.", +} + +func (UserAgentDenyRule) SwaggerDoc() map[string]string { + return map_UserAgentDenyRule +} + +var map_UserAgentMatchRule = map[string]string{ + "": "UserAgentMatchRule describes how to match a given request based on User-Agent and HTTPVerb", + "regex": "UserAgentRegex is a regex that is checked against the User-Agent. Known variants of oc clients 1. oc accessing kube resources: oc/v1.2.0 (linux/amd64) kubernetes/bc4550d 2. oc accessing openshift resources: oc/v1.1.3 (linux/amd64) openshift/b348c2f 3. openshift kubectl accessing kube resources: openshift/v1.2.0 (linux/amd64) kubernetes/bc4550d 4. openshift kubectl accessing openshift resources: openshift/v1.1.3 (linux/amd64) openshift/b348c2f 5. oadm accessing kube resources: oadm/v1.2.0 (linux/amd64) kubernetes/bc4550d 6. oadm accessing openshift resources: oadm/v1.1.3 (linux/amd64) openshift/b348c2f 7. openshift cli accessing kube resources: openshift/v1.2.0 (linux/amd64) kubernetes/bc4550d 8. openshift cli accessing openshift resources: openshift/v1.1.3 (linux/amd64) openshift/b348c2f", + "httpVerbs": "HTTPVerbs specifies which HTTP verbs should be matched. An empty list means \"match all verbs\".", +} + +func (UserAgentMatchRule) SwaggerDoc() map[string]string { + return map_UserAgentMatchRule +} + +var map_UserAgentMatchingConfig = map[string]string{ + "": "UserAgentMatchingConfig controls how API calls from *voluntarily* identifying clients will be handled. THIS DOES NOT DEFEND AGAINST MALICIOUS CLIENTS!", + "requiredClients": "If this list is non-empty, then a User-Agent must match one of the UserAgentRegexes to be allowed", + "deniedClients": "If this list is non-empty, then a User-Agent must not match any of the UserAgentRegexes", + "defaultRejectionMessage": "DefaultRejectionMessage is the message shown when rejecting a client. If it is not a set, a generic message is given.", +} + +func (UserAgentMatchingConfig) SwaggerDoc() map[string]string { + return map_UserAgentMatchingConfig +} + +var map_WebhookTokenAuthenticator = map[string]string{ + "": "WebhookTokenAuthenticators holds the necessary configuation options for external token authenticators", + "configFile": "ConfigFile is a path to a Kubeconfig file with the webhook configuration", + "cacheTTL": "CacheTTL indicates how long an authentication result should be cached. It takes a valid time duration string (e.g. \"5m\"). If empty, you get a default timeout of 2 minutes. If zero (e.g. \"0m\"), caching is disabled", +} + +func (WebhookTokenAuthenticator) SwaggerDoc() map[string]string { + return map_WebhookTokenAuthenticator +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/api/machine/.codegen.yaml b/vendor/github.com/openshift/api/machine/.codegen.yaml new file mode 100644 index 000000000..ffa2c8d9b --- /dev/null +++ b/vendor/github.com/openshift/api/machine/.codegen.yaml @@ -0,0 +1,2 @@ +swaggerdocs: + commentPolicy: Warn diff --git a/vendor/github.com/openshift/api/machine/OWNERS b/vendor/github.com/openshift/api/machine/OWNERS new file mode 100644 index 000000000..53e482c75 --- /dev/null +++ b/vendor/github.com/openshift/api/machine/OWNERS @@ -0,0 +1,4 @@ +reviewers: + - JoelSpeed + - alexander-demichev + - mandre diff --git a/vendor/github.com/openshift/api/machine/install.go b/vendor/github.com/openshift/api/machine/install.go new file mode 100644 index 000000000..68df57704 --- /dev/null +++ b/vendor/github.com/openshift/api/machine/install.go @@ -0,0 +1,32 @@ +package machine + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + machinev1 "github.com/openshift/api/machine/v1" + machinev1alpha1 "github.com/openshift/api/machine/v1alpha1" + machinev1beta1 "github.com/openshift/api/machine/v1beta1" +) + +const ( + GroupName = "machine.openshift.io" +) + +var ( + schemeBuilder = runtime.NewSchemeBuilder( + machinev1beta1.Install, + machinev1.Install, + machinev1alpha1.Install, + ) + // Install is a function which adds every version of this group to a scheme + Install = schemeBuilder.AddToScheme +) + +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +func Kind(kind string) schema.GroupKind { + return schema.GroupKind{Group: GroupName, Kind: kind} +} diff --git a/vendor/github.com/openshift/api/machine/v1/0000_10_controlplanemachineset.crd.yaml b/vendor/github.com/openshift/api/machine/v1/0000_10_controlplanemachineset.crd.yaml new file mode 100644 index 000000000..699621ec4 --- /dev/null +++ b/vendor/github.com/openshift/api/machine/v1/0000_10_controlplanemachineset.crd.yaml @@ -0,0 +1,564 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + capability.openshift.io/name: MachineAPI + api-approved.openshift.io: https://github.com/openshift/api/pull/1112 + exclude.release.openshift.io/internal-openshift-hosted: "true" + include.release.openshift.io/self-managed-high-availability: "true" + creationTimestamp: null + name: controlplanemachinesets.machine.openshift.io +spec: + group: machine.openshift.io + names: + kind: ControlPlaneMachineSet + listKind: ControlPlaneMachineSetList + plural: controlplanemachinesets + singular: controlplanemachineset + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Desired Replicas + jsonPath: .spec.replicas + name: Desired + type: integer + - description: Current Replicas + jsonPath: .status.replicas + name: Current + type: integer + - description: Ready Replicas + jsonPath: .status.readyReplicas + name: Ready + type: integer + - description: Updated Replicas + jsonPath: .status.updatedReplicas + name: Updated + type: integer + - description: Observed number of unavailable replicas + jsonPath: .status.unavailableReplicas + name: Unavailable + type: integer + - description: ControlPlaneMachineSet state + jsonPath: .spec.state + name: State + type: string + - description: ControlPlaneMachineSet age + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: 'ControlPlaneMachineSet ensures that a specified number of control plane machine replicas are running at any given time. Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).' + type: object + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ControlPlaneMachineSet represents the configuration of the ControlPlaneMachineSet. + type: object + required: + - replicas + - selector + - template + properties: + replicas: + description: Replicas defines how many Control Plane Machines should be created by this ControlPlaneMachineSet. This field is immutable and cannot be changed after cluster installation. The ControlPlaneMachineSet only operates with 3 or 5 node control planes, 3 and 5 are the only valid values for this field. + type: integer + format: int32 + default: 3 + enum: + - 3 + - 5 + x-kubernetes-validations: + - rule: self == oldSelf + message: replicas is immutable + selector: + description: Label selector for Machines. Existing Machines selected by this selector will be the ones affected by this ControlPlaneMachineSet. It must match the template's labels. This field is considered immutable after creation of the resource. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + x-kubernetes-validations: + - rule: self == oldSelf + message: selector is immutable + state: + description: State defines whether the ControlPlaneMachineSet is Active or Inactive. When Inactive, the ControlPlaneMachineSet will not take any action on the state of the Machines within the cluster. When Active, the ControlPlaneMachineSet will reconcile the Machines and will update the Machines as necessary. Once Active, a ControlPlaneMachineSet cannot be made Inactive. To prevent further action please remove the ControlPlaneMachineSet. + type: string + default: Inactive + enum: + - Active + - Inactive + x-kubernetes-validations: + - rule: oldSelf != 'Active' || self == oldSelf + message: state cannot be changed once Active + strategy: + description: Strategy defines how the ControlPlaneMachineSet will update Machines when it detects a change to the ProviderSpec. + type: object + default: + type: RollingUpdate + properties: + type: + description: Type defines the type of update strategy that should be used when updating Machines owned by the ControlPlaneMachineSet. Valid values are "RollingUpdate" and "OnDelete". The current default value is "RollingUpdate". + type: string + default: RollingUpdate + enum: + - RollingUpdate + - OnDelete + template: + description: Template describes the Control Plane Machines that will be created by this ControlPlaneMachineSet. + type: object + required: + - machineType + properties: + machineType: + description: MachineType determines the type of Machines that should be managed by the ControlPlaneMachineSet. Currently, the only valid value is machines_v1beta1_machine_openshift_io. + type: string + enum: + - machines_v1beta1_machine_openshift_io + machines_v1beta1_machine_openshift_io: + description: OpenShiftMachineV1Beta1Machine defines the template for creating Machines from the v1beta1.machine.openshift.io API group. + type: object + required: + - metadata + - spec + properties: + failureDomains: + description: FailureDomains is the list of failure domains (sometimes called availability zones) in which the ControlPlaneMachineSet should balance the Control Plane Machines. This will be merged into the ProviderSpec given in the template. This field is optional on platforms that do not require placement information. + type: object + required: + - platform + properties: + aws: + description: AWS configures failure domain information for the AWS platform. + type: array + items: + description: AWSFailureDomain configures failure domain information for the AWS platform. + type: object + minProperties: 1 + properties: + placement: + description: Placement configures the placement information for this instance. + type: object + required: + - availabilityZone + properties: + availabilityZone: + description: AvailabilityZone is the availability zone of the instance. + type: string + subnet: + description: Subnet is a reference to the subnet to use for this instance. + type: object + required: + - type + properties: + arn: + description: ARN of resource. + type: string + filters: + description: Filters is a set of filters used to identify a resource. + type: array + items: + description: AWSResourceFilter is a filter used to identify an AWS resource + type: object + required: + - name + properties: + name: + description: Name of the filter. Filter names are case-sensitive. + type: string + values: + description: Values includes one or more filter values. Filter values are case-sensitive. + type: array + items: + type: string + id: + description: ID of resource. + type: string + type: + description: Type determines how the reference will fetch the AWS resource. + type: string + enum: + - ID + - ARN + - Filters + x-kubernetes-validations: + - rule: 'has(self.type) && self.type == ''ID'' ? has(self.id) : !has(self.id)' + message: id is required when type is ID, and forbidden otherwise + - rule: 'has(self.type) && self.type == ''ARN'' ? has(self.arn) : !has(self.arn)' + message: arn is required when type is ARN, and forbidden otherwise + - rule: 'has(self.type) && self.type == ''Filters'' ? has(self.filters) : !has(self.filters)' + message: filters is required when type is Filters, and forbidden otherwise + azure: + description: Azure configures failure domain information for the Azure platform. + type: array + items: + description: AzureFailureDomain configures failure domain information for the Azure platform. + type: object + required: + - zone + properties: + zone: + description: Availability Zone for the virtual machine. If nil, the virtual machine should be deployed to no zone. + type: string + gcp: + description: GCP configures failure domain information for the GCP platform. + type: array + items: + description: GCPFailureDomain configures failure domain information for the GCP platform + type: object + required: + - zone + properties: + zone: + description: Zone is the zone in which the GCP machine provider will create the VM. + type: string + openstack: + description: OpenStack configures failure domain information for the OpenStack platform. + type: array + items: + description: OpenStackFailureDomain configures failure domain information for the OpenStack platform. + type: object + minProperties: 1 + properties: + availabilityZone: + description: 'availabilityZone is the nova availability zone in which the OpenStack machine provider will create the VM. If not specified, the VM will be created in the default availability zone specified in the nova configuration. Availability zone names must NOT contain : since it is used by admin users to specify hosts where instances are launched in server creation. Also, it must not contain spaces otherwise it will lead to node that belongs to this availability zone register failure, see kubernetes/cloud-provider-openstack#1379 for further information. The maximum length of availability zone name is 63 as per labels limits.' + type: string + maxLength: 63 + minLength: 1 + pattern: '^[^: ]*$' + rootVolume: + description: rootVolume contains settings that will be used by the OpenStack machine provider to create the root volume attached to the VM. If not specified, no root volume will be created. + type: object + required: + - volumeType + properties: + availabilityZone: + description: availabilityZone specifies the Cinder availability zone where the root volume will be created. If not specifified, the root volume will be created in the availability zone specified by the volume type in the cinder configuration. If the volume type (configured in the OpenStack cluster) does not specify an availability zone, the root volume will be created in the default availability zone specified in the cinder configuration. See https://docs.openstack.org/cinder/latest/admin/availability-zone-type.html for more details. If the OpenStack cluster is deployed with the cross_az_attach configuration option set to false, the root volume will have to be in the same availability zone as the VM (defined by OpenStackFailureDomain.AvailabilityZone). Availability zone names must NOT contain spaces otherwise it will lead to volume that belongs to this availability zone register failure, see kubernetes/cloud-provider-openstack#1379 for further information. The maximum length of availability zone name is 63 as per labels limits. + type: string + maxLength: 63 + minLength: 1 + pattern: ^[^ ]*$ + volumeType: + description: volumeType specifies the type of the root volume that will be provisioned. The maximum length of a volume type name is 255 characters, as per the OpenStack limit. + type: string + maxLength: 255 + minLength: 1 + x-kubernetes-validations: + - rule: '!has(self.availabilityZone) || !has(self.rootVolume) || has(self.rootVolume.availabilityZone)' + message: rootVolume.availabilityZone is required when availabilityZone is set + platform: + description: Platform identifies the platform for which the FailureDomain represents. Currently supported values are AWS, Azure, and GCP. + type: string + enum: + - "" + - AWS + - Azure + - BareMetal + - GCP + - Libvirt + - OpenStack + - None + - VSphere + - oVirt + - IBMCloud + - KubeVirt + - EquinixMetal + - PowerVS + - AlibabaCloud + - Nutanix + - External + x-kubernetes-validations: + - rule: 'has(self.platform) && self.platform == ''AWS'' ? has(self.aws) : !has(self.aws)' + message: aws configuration is required when platform is AWS, and forbidden otherwise + - rule: 'has(self.platform) && self.platform == ''Azure'' ? has(self.azure) : !has(self.azure)' + message: azure configuration is required when platform is Azure, and forbidden otherwise + - rule: 'has(self.platform) && self.platform == ''GCP'' ? has(self.gcp) : !has(self.gcp)' + message: gcp configuration is required when platform is GCP, and forbidden otherwise + - rule: 'has(self.platform) && self.platform == ''OpenStack'' ? has(self.openstack) : !has(self.openstack)' + message: openstack configuration is required when platform is OpenStack, and forbidden otherwise + metadata: + description: 'ObjectMeta is the standard object metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata Labels are required to match the ControlPlaneMachineSet selector.' + type: object + required: + - labels + properties: + annotations: + description: 'Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations' + type: object + additionalProperties: + type: string + labels: + description: 'Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels. This field must contain both the ''machine.openshift.io/cluster-api-machine-role'' and ''machine.openshift.io/cluster-api-machine-type'' labels, both with a value of ''master''. It must also contain a label with the key ''machine.openshift.io/cluster-api-cluster''.' + type: object + additionalProperties: + type: string + x-kubernetes-validations: + - rule: '''machine.openshift.io/cluster-api-machine-role'' in self && self[''machine.openshift.io/cluster-api-machine-role''] == ''master''' + message: label 'machine.openshift.io/cluster-api-machine-role' is required, and must have value 'master' + - rule: '''machine.openshift.io/cluster-api-machine-type'' in self && self[''machine.openshift.io/cluster-api-machine-type''] == ''master''' + message: label 'machine.openshift.io/cluster-api-machine-type' is required, and must have value 'master' + - rule: '''machine.openshift.io/cluster-api-cluster'' in self' + message: label 'machine.openshift.io/cluster-api-cluster' is required + spec: + description: Spec contains the desired configuration of the Control Plane Machines. The ProviderSpec within contains platform specific details for creating the Control Plane Machines. The ProviderSe should be complete apart from the platform specific failure domain field. This will be overriden when the Machines are created based on the FailureDomains field. + type: object + properties: + lifecycleHooks: + description: LifecycleHooks allow users to pause operations on the machine at certain predefined points within the machine lifecycle. + type: object + properties: + preDrain: + description: PreDrain hooks prevent the machine from being drained. This also blocks further lifecycle events, such as termination. + type: array + items: + description: LifecycleHook represents a single instance of a lifecycle hook + type: object + required: + - name + - owner + properties: + name: + description: Name defines a unique name for the lifcycle hook. The name should be unique and descriptive, ideally 1-3 words, in CamelCase or it may be namespaced, eg. foo.example.com/CamelCase. Names must be unique and should only be managed by a single entity. + type: string + maxLength: 256 + minLength: 3 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + owner: + description: Owner defines the owner of the lifecycle hook. This should be descriptive enough so that users can identify who/what is responsible for blocking the lifecycle. This could be the name of a controller (e.g. clusteroperator/etcd) or an administrator managing the hook. + type: string + maxLength: 512 + minLength: 3 + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + preTerminate: + description: PreTerminate hooks prevent the machine from being terminated. PreTerminate hooks be actioned after the Machine has been drained. + type: array + items: + description: LifecycleHook represents a single instance of a lifecycle hook + type: object + required: + - name + - owner + properties: + name: + description: Name defines a unique name for the lifcycle hook. The name should be unique and descriptive, ideally 1-3 words, in CamelCase or it may be namespaced, eg. foo.example.com/CamelCase. Names must be unique and should only be managed by a single entity. + type: string + maxLength: 256 + minLength: 3 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + owner: + description: Owner defines the owner of the lifecycle hook. This should be descriptive enough so that users can identify who/what is responsible for blocking the lifecycle. This could be the name of a controller (e.g. clusteroperator/etcd) or an administrator managing the hook. + type: string + maxLength: 512 + minLength: 3 + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + metadata: + description: ObjectMeta will autopopulate the Node created. Use this to indicate what labels, annotations, name prefix, etc., should be used when creating the Node. + type: object + properties: + annotations: + description: 'Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations' + type: object + additionalProperties: + type: string + generateName: + description: "GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server. \n If this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header). \n Applied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency" + type: string + labels: + description: 'Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels' + type: object + additionalProperties: + type: string + name: + description: 'Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + namespace: + description: "Namespace defines the space within each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty. \n Must be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces" + type: string + ownerReferences: + description: List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller. + type: array + items: + description: OwnerReference contains enough information to let you identify an owning object. An owning object must be in the same namespace as the dependent, or be cluster-scoped, so there is no namespace field. + type: object + required: + - apiVersion + - kind + - name + - uid + properties: + apiVersion: + description: API version of the referent. + type: string + blockOwnerDeletion: + description: If true, AND if the owner has the "foregroundDeletion" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. See https://kubernetes.io/docs/concepts/architecture/garbage-collection/#foreground-deletion for how the garbage collector interacts with this field and enforces the foreground deletion. Defaults to false. To set this field, a user needs "delete" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned. + type: boolean + controller: + description: If true, this reference points to the managing controller. + type: boolean + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids' + type: string + providerID: + description: ProviderID is the identification ID of the machine provided by the provider. This field must match the provider ID as seen on the node object corresponding to this machine. This field is required by higher level consumers of cluster-api. Example use case is cluster autoscaler with cluster-api as provider. Clean-up logic in the autoscaler compares machines to nodes to find out machines at provider which could not get registered as Kubernetes nodes. With cluster-api as a generic out-of-tree provider for autoscaler, this field is required by autoscaler to be able to have a provider view of the list of machines. Another list of nodes is queried from the k8s apiserver and then a comparison is done to find out unregistered machines and are marked for delete. This field will be set by the actuators and consumed by higher level entities like autoscaler that will be interfacing with cluster-api as generic provider. + type: string + providerSpec: + description: ProviderSpec details Provider-specific configuration to use during node creation. + type: object + properties: + value: + description: Value is an inlined, serialized representation of the resource configuration. It is recommended that providers maintain their own versioned API types that should be serialized/deserialized from this field, akin to component config. + type: object + x-kubernetes-preserve-unknown-fields: true + taints: + description: The list of the taints to be applied to the corresponding Node in additive manner. This list will not overwrite any other taints added to the Node on an ongoing basis by other entities. These taints should be actively reconciled e.g. if you ask the machine controller to apply a taint and then manually remove the taint the machine controller will put it back) but not have the machine controller remove any taints + type: array + items: + description: The node this Taint is attached to has the "effect" on any pod that does not tolerate the Taint. + type: object + required: + - effect + - key + properties: + effect: + description: Required. The effect of the taint on pods that do not tolerate the taint. Valid effects are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Required. The taint key to be applied to a node. + type: string + timeAdded: + description: TimeAdded represents the time at which the taint was added. It is only written for NoExecute taints. + type: string + format: date-time + value: + description: The taint value corresponding to the taint key. + type: string + x-kubernetes-validations: + - rule: 'has(self.machineType) && self.machineType == ''machines_v1beta1_machine_openshift_io'' ? has(self.machines_v1beta1_machine_openshift_io) : !has(self.machines_v1beta1_machine_openshift_io)' + message: machines_v1beta1_machine_openshift_io configuration is required when machineType is machines_v1beta1_machine_openshift_io, and forbidden otherwise + status: + description: ControlPlaneMachineSetStatus represents the status of the ControlPlaneMachineSet CRD. + type: object + properties: + conditions: + description: 'Conditions represents the observations of the ControlPlaneMachineSet''s current state. Known .status.conditions.type are: Available, Degraded and Progressing.' + type: array + items: + description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, \n type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + type: object + required: + - lastTransitionTime + - message + - reason + - status + - type + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + type: string + format: date-time + message: + description: message is a human readable message indicating details about the transition. This may be an empty string. + type: string + maxLength: 32768 + observedGeneration: + description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. + type: integer + format: int64 + minimum: 0 + reason: + description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. + type: string + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + status: + description: status of the condition, one of True, False, Unknown. + type: string + enum: + - "True" + - "False" + - Unknown + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + type: string + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + observedGeneration: + description: ObservedGeneration is the most recent generation observed for this ControlPlaneMachineSet. It corresponds to the ControlPlaneMachineSets's generation, which is updated on mutation by the API Server. + type: integer + format: int64 + readyReplicas: + description: ReadyReplicas is the number of Control Plane Machines created by the ControlPlaneMachineSet controller which are ready. Note that this value may be higher than the desired number of replicas while rolling updates are in-progress. + type: integer + format: int32 + replicas: + description: Replicas is the number of Control Plane Machines created by the ControlPlaneMachineSet controller. Note that during update operations this value may differ from the desired replica count. + type: integer + format: int32 + unavailableReplicas: + description: UnavailableReplicas is the number of Control Plane Machines that are still required before the ControlPlaneMachineSet reaches the desired available capacity. When this value is non-zero, the number of ReadyReplicas is less than the desired Replicas. + type: integer + format: int32 + updatedReplicas: + description: UpdatedReplicas is the number of non-terminated Control Plane Machines created by the ControlPlaneMachineSet controller that have the desired provider spec and are ready. This value is set to 0 when a change is detected to the desired spec. When the update strategy is RollingUpdate, this will also coincide with starting the process of updating the Machines. When the update strategy is OnDelete, this value will remain at 0 until a user deletes an existing replica and its replacement has become ready. + type: integer + format: int32 + served: true + storage: true + subresources: + scale: + labelSelectorPath: .status.labelSelector + specReplicasPath: .spec.replicas + statusReplicasPath: .status.replicas + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/vendor/github.com/openshift/api/machine/v1/Makefile b/vendor/github.com/openshift/api/machine/v1/Makefile new file mode 100644 index 000000000..767014ac1 --- /dev/null +++ b/vendor/github.com/openshift/api/machine/v1/Makefile @@ -0,0 +1,3 @@ +.PHONY: test +test: + make -C ../../tests test GINKGO_EXTRA_ARGS=--focus="machine.openshift.io/v1" diff --git a/vendor/github.com/openshift/api/machine/v1/common.go b/vendor/github.com/openshift/api/machine/v1/common.go new file mode 100644 index 000000000..941d22b1c --- /dev/null +++ b/vendor/github.com/openshift/api/machine/v1/common.go @@ -0,0 +1,13 @@ +package v1 + +// InstanceTenancy indicates if instance should run on shared or single-tenant hardware. +type InstanceTenancy string + +const ( + // DefaultTenancy instance runs on shared hardware + DefaultTenancy InstanceTenancy = "default" + // DedicatedTenancy instance runs on single-tenant hardware + DedicatedTenancy InstanceTenancy = "dedicated" + // HostTenancy instance runs on a Dedicated Host, which is an isolated server with configurations that you can control. + HostTenancy InstanceTenancy = "host" +) diff --git a/vendor/github.com/openshift/api/machine/v1/doc.go b/vendor/github.com/openshift/api/machine/v1/doc.go new file mode 100644 index 000000000..7bd97c950 --- /dev/null +++ b/vendor/github.com/openshift/api/machine/v1/doc.go @@ -0,0 +1,7 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true + +// +kubebuilder:validation:Optional +// +groupName=machine.openshift.io +package v1 diff --git a/vendor/github.com/openshift/api/machine/v1/register.go b/vendor/github.com/openshift/api/machine/v1/register.go new file mode 100644 index 000000000..b950169bf --- /dev/null +++ b/vendor/github.com/openshift/api/machine/v1/register.go @@ -0,0 +1,40 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + GroupName = "machine.openshift.io" + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // Install is a function which adds this version to a scheme + Install = schemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = schemeBuilder.AddToScheme +) + +// Resource generated code relies on this being here, but it logically belongs to the group +// DEPRECATED +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + metav1.AddToGroupVersion(scheme, GroupVersion) + + scheme.AddKnownTypes(GroupVersion, + &ControlPlaneMachineSet{}, + &ControlPlaneMachineSetList{}, + ) + + return nil +} diff --git a/vendor/github.com/openshift/api/machine/v1/stable.controlplanemachineset.aws.testsuite.yaml b/vendor/github.com/openshift/api/machine/v1/stable.controlplanemachineset.aws.testsuite.yaml new file mode 100644 index 000000000..07a5ec7c1 --- /dev/null +++ b/vendor/github.com/openshift/api/machine/v1/stable.controlplanemachineset.aws.testsuite.yaml @@ -0,0 +1,368 @@ +apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this +name: "[Stable] ControlPlaneMachineSet (AWS)" +crd: 0000_10_controlplanemachineset.crd.yaml +tests: + onCreate: + - name: Should reject an AWS platform failure domain without any AWS config + initial: | + apiVersion: machine.openshift.io/v1 + kind: ControlPlaneMachineSet + spec: + selector: + matchLabels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + template: + machineType: machines_v1beta1_machine_openshift_io + machines_v1beta1_machine_openshift_io: + metadata: + labels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + machine.openshift.io/cluster-api-cluster: cluster + spec: + providerSpec: {} + failureDomains: + platform: AWS + expectedError: "spec.template.machines_v1beta1_machine_openshift_io.failureDomains: Invalid value: \"object\": aws configuration is required when platform is AWS" + - name: Should reject an AWS configured failure domain without a platform type + initial: | + apiVersion: machine.openshift.io/v1 + kind: ControlPlaneMachineSet + spec: + selector: + matchLabels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + template: + machineType: machines_v1beta1_machine_openshift_io + machines_v1beta1_machine_openshift_io: + metadata: + labels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + machine.openshift.io/cluster-api-cluster: cluster + spec: + providerSpec: {} + failureDomains: + aws: + - placement: + availabilityZone: foo + expectedError: "spec.template.machines_v1beta1_machine_openshift_io.failureDomains.platform: Required value" + - name: Should reject an AWS configured failure domain with the wrong platform type + initial: | + apiVersion: machine.openshift.io/v1 + kind: ControlPlaneMachineSet + spec: + selector: + matchLabels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + template: + machineType: machines_v1beta1_machine_openshift_io + machines_v1beta1_machine_openshift_io: + metadata: + labels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + machine.openshift.io/cluster-api-cluster: cluster + spec: + providerSpec: {} + failureDomains: + platform: BareMetal + aws: + - placement: + availabilityZone: foo + expectedError: "spec.template.machines_v1beta1_machine_openshift_io.failureDomains: Invalid value: \"object\": aws configuration is required when platform is AWS, and forbidden otherwise" + - name: Should reject an AWS failure domain with the subnet type omitted + initial: | + apiVersion: machine.openshift.io/v1 + kind: ControlPlaneMachineSet + spec: + selector: + matchLabels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + template: + machineType: machines_v1beta1_machine_openshift_io + machines_v1beta1_machine_openshift_io: + metadata: + labels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + machine.openshift.io/cluster-api-cluster: cluster + spec: + providerSpec: {} + failureDomains: + platform: AWS + aws: + - subnet: {} + expectedError: "spec.template.machines_v1beta1_machine_openshift_io.failureDomains.aws[0].subnet.type: Required value, : Invalid value: \"null\"" + - name: Should reject an AWS failure domain with the subnet type ID and no ID provided + initial: | + apiVersion: machine.openshift.io/v1 + kind: ControlPlaneMachineSet + spec: + selector: + matchLabels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + template: + machineType: machines_v1beta1_machine_openshift_io + machines_v1beta1_machine_openshift_io: + metadata: + labels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + machine.openshift.io/cluster-api-cluster: cluster + spec: + providerSpec: {} + failureDomains: + platform: AWS + aws: + - subnet: + type: ID + expectedError: "spec.template.machines_v1beta1_machine_openshift_io.failureDomains.aws[0].subnet: Invalid value: \"object\": id is required when type is ID, and forbidden otherwise" + - name: Should accept an AWS failure domain with the subnet type ID and an ID provided + initial: | + apiVersion: machine.openshift.io/v1 + kind: ControlPlaneMachineSet + spec: + selector: + matchLabels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + template: + machineType: machines_v1beta1_machine_openshift_io + machines_v1beta1_machine_openshift_io: + metadata: + labels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + machine.openshift.io/cluster-api-cluster: cluster + spec: + providerSpec: {} + failureDomains: + platform: AWS + aws: + - subnet: + type: ID + id: foo + expected: | + apiVersion: machine.openshift.io/v1 + kind: ControlPlaneMachineSet + spec: + replicas: 3 + state: Inactive + strategy: + type: RollingUpdate + selector: + matchLabels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + template: + machineType: machines_v1beta1_machine_openshift_io + machines_v1beta1_machine_openshift_io: + metadata: + labels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + machine.openshift.io/cluster-api-cluster: cluster + spec: + providerSpec: {} + failureDomains: + platform: AWS + aws: + - subnet: + type: ID + id: foo + - name: Should reject an AWS failure domain with the subnet type ID and an ARN provided + initial: | + apiVersion: machine.openshift.io/v1 + kind: ControlPlaneMachineSet + spec: + selector: + matchLabels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + template: + machineType: machines_v1beta1_machine_openshift_io + machines_v1beta1_machine_openshift_io: + metadata: + labels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + machine.openshift.io/cluster-api-cluster: cluster + spec: + providerSpec: {} + failureDomains: + platform: AWS + aws: + - subnet: + type: ID + id: foo + arn: foo + expectedError: "spec.template.machines_v1beta1_machine_openshift_io.failureDomains.aws[0].subnet: Invalid value: \"object\": arn is required when type is ARN, and forbidden otherwise" + - name: Should reject an AWS failure domain with the subnet type ID and a Filter provided + initial: | + apiVersion: machine.openshift.io/v1 + kind: ControlPlaneMachineSet + spec: + selector: + matchLabels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + template: + machineType: machines_v1beta1_machine_openshift_io + machines_v1beta1_machine_openshift_io: + metadata: + labels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + machine.openshift.io/cluster-api-cluster: cluster + spec: + providerSpec: {} + failureDomains: + platform: AWS + aws: + - subnet: + type: ID + id: foo + filters: + - name: foo + expectedError: "spec.template.machines_v1beta1_machine_openshift_io.failureDomains.aws[0].subnet: Invalid value: \"object\": filters is required when type is Filters, and forbidden otherwise" + - name: Should accept an AWS failure domain with the subnet type ARN and an ARN provided + initial: | + apiVersion: machine.openshift.io/v1 + kind: ControlPlaneMachineSet + spec: + selector: + matchLabels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + template: + machineType: machines_v1beta1_machine_openshift_io + machines_v1beta1_machine_openshift_io: + metadata: + labels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + machine.openshift.io/cluster-api-cluster: cluster + spec: + providerSpec: {} + failureDomains: + platform: AWS + aws: + - subnet: + type: ARN + arn: foo + expected: | + apiVersion: machine.openshift.io/v1 + kind: ControlPlaneMachineSet + spec: + replicas: 3 + state: Inactive + strategy: + type: RollingUpdate + selector: + matchLabels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + template: + machineType: machines_v1beta1_machine_openshift_io + machines_v1beta1_machine_openshift_io: + metadata: + labels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + machine.openshift.io/cluster-api-cluster: cluster + spec: + providerSpec: {} + failureDomains: + platform: AWS + aws: + - subnet: + type: ARN + arn: foo + - name: Should accept an AWS failure domain with the subnet type Filters and a Filter provided + initial: | + apiVersion: machine.openshift.io/v1 + kind: ControlPlaneMachineSet + spec: + selector: + matchLabels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + template: + machineType: machines_v1beta1_machine_openshift_io + machines_v1beta1_machine_openshift_io: + metadata: + labels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + machine.openshift.io/cluster-api-cluster: cluster + spec: + providerSpec: {} + failureDomains: + platform: AWS + aws: + - subnet: + type: Filters + filters: + - name: foo + expected: | + apiVersion: machine.openshift.io/v1 + kind: ControlPlaneMachineSet + spec: + replicas: 3 + state: Inactive + strategy: + type: RollingUpdate + selector: + matchLabels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + template: + machineType: machines_v1beta1_machine_openshift_io + machines_v1beta1_machine_openshift_io: + metadata: + labels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + machine.openshift.io/cluster-api-cluster: cluster + spec: + providerSpec: {} + failureDomains: + platform: AWS + aws: + - subnet: + type: Filters + filters: + - name: foo + - name: Should reject an AWS failure domain with the subnet type ARN and an ID provided + initial: | + apiVersion: machine.openshift.io/v1 + kind: ControlPlaneMachineSet + spec: + selector: + matchLabels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + template: + machineType: machines_v1beta1_machine_openshift_io + machines_v1beta1_machine_openshift_io: + metadata: + labels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + machine.openshift.io/cluster-api-cluster: cluster + spec: + providerSpec: {} + failureDomains: + platform: AWS + aws: + - subnet: + type: ARN + id: foo + arn: foo + expectedError: "spec.template.machines_v1beta1_machine_openshift_io.failureDomains.aws[0].subnet: Invalid value: \"object\": id is required when type is ID, and forbidden otherwise" diff --git a/vendor/github.com/openshift/api/machine/v1/stable.controlplanemachineset.azure.testsuite.yaml b/vendor/github.com/openshift/api/machine/v1/stable.controlplanemachineset.azure.testsuite.yaml new file mode 100644 index 000000000..191bf65f2 --- /dev/null +++ b/vendor/github.com/openshift/api/machine/v1/stable.controlplanemachineset.azure.testsuite.yaml @@ -0,0 +1,74 @@ +apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this +name: "[Stable] ControlPlaneMachineSet" +crd: 0000_10_controlplanemachineset.crd.yaml +tests: + onCreate: + - name: Should reject an Azure platform failure domain without any Azure config + initial: | + apiVersion: machine.openshift.io/v1 + kind: ControlPlaneMachineSet + spec: + selector: + matchLabels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + template: + machineType: machines_v1beta1_machine_openshift_io + machines_v1beta1_machine_openshift_io: + metadata: + labels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + machine.openshift.io/cluster-api-cluster: cluster + spec: + providerSpec: {} + failureDomains: + platform: Azure + expectedError: "spec.template.machines_v1beta1_machine_openshift_io.failureDomains: Invalid value: \"object\": azure configuration is required when platform is Azure" + - name: Should reject an Azure configured failure domain without a platform type + initial: | + apiVersion: machine.openshift.io/v1 + kind: ControlPlaneMachineSet + spec: + selector: + matchLabels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + template: + machineType: machines_v1beta1_machine_openshift_io + machines_v1beta1_machine_openshift_io: + metadata: + labels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + machine.openshift.io/cluster-api-cluster: cluster + spec: + providerSpec: {} + failureDomains: + azure: + - zone: foo + expectedError: "spec.template.machines_v1beta1_machine_openshift_io.failureDomains.platform: Required value" + - name: Should reject an Azure configured failure domain with the wrong platform type + initial: | + apiVersion: machine.openshift.io/v1 + kind: ControlPlaneMachineSet + spec: + selector: + matchLabels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + template: + machineType: machines_v1beta1_machine_openshift_io + machines_v1beta1_machine_openshift_io: + metadata: + labels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + machine.openshift.io/cluster-api-cluster: cluster + spec: + providerSpec: {} + failureDomains: + platform: BareMetal + azure: + - zone: foo + expectedError: "spec.template.machines_v1beta1_machine_openshift_io.failureDomains: Invalid value: \"object\": azure configuration is required when platform is Azure, and forbidden otherwise" diff --git a/vendor/github.com/openshift/api/machine/v1/stable.controlplanemachineset.gcp.testsuite.yaml b/vendor/github.com/openshift/api/machine/v1/stable.controlplanemachineset.gcp.testsuite.yaml new file mode 100644 index 000000000..518625f91 --- /dev/null +++ b/vendor/github.com/openshift/api/machine/v1/stable.controlplanemachineset.gcp.testsuite.yaml @@ -0,0 +1,74 @@ +apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this +name: "[Stable] ControlPlaneMachineSet" +crd: 0000_10_controlplanemachineset.crd.yaml +tests: + onCreate: + - name: Should reject an GCP platform failure domain without any GCP config + initial: | + apiVersion: machine.openshift.io/v1 + kind: ControlPlaneMachineSet + spec: + selector: + matchLabels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + template: + machineType: machines_v1beta1_machine_openshift_io + machines_v1beta1_machine_openshift_io: + metadata: + labels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + machine.openshift.io/cluster-api-cluster: cluster + spec: + providerSpec: {} + failureDomains: + platform: GCP + expectedError: "spec.template.machines_v1beta1_machine_openshift_io.failureDomains: Invalid value: \"object\": gcp configuration is required when platform is GCP" + - name: Should reject an GCP configured failure domain without a platform type + initial: | + apiVersion: machine.openshift.io/v1 + kind: ControlPlaneMachineSet + spec: + selector: + matchLabels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + template: + machineType: machines_v1beta1_machine_openshift_io + machines_v1beta1_machine_openshift_io: + metadata: + labels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + machine.openshift.io/cluster-api-cluster: cluster + spec: + providerSpec: {} + failureDomains: + aws: + - zone: foo + expectedError: "spec.template.machines_v1beta1_machine_openshift_io.failureDomains.platform: Required value" + - name: Should reject an GCP configured failure domain with the wrong platform type + initial: | + apiVersion: machine.openshift.io/v1 + kind: ControlPlaneMachineSet + spec: + selector: + matchLabels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + template: + machineType: machines_v1beta1_machine_openshift_io + machines_v1beta1_machine_openshift_io: + metadata: + labels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + machine.openshift.io/cluster-api-cluster: cluster + spec: + providerSpec: {} + failureDomains: + platform: BareMetal + gcp: + - zone: foo + expectedError: "spec.template.machines_v1beta1_machine_openshift_io.failureDomains: Invalid value: \"object\": gcp configuration is required when platform is GCP, and forbidden otherwise" diff --git a/vendor/github.com/openshift/api/machine/v1/stable.controlplanemachineset.openstack.testsuite.yaml b/vendor/github.com/openshift/api/machine/v1/stable.controlplanemachineset.openstack.testsuite.yaml new file mode 100644 index 000000000..a09de51e0 --- /dev/null +++ b/vendor/github.com/openshift/api/machine/v1/stable.controlplanemachineset.openstack.testsuite.yaml @@ -0,0 +1,632 @@ +apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this +name: "[Stable] ControlPlaneMachineSet" +crd: 0000_10_controlplanemachineset.crd.yaml +tests: + onCreate: + - name: Should reject an OpenStack platform failure domain without any OpenStack config + initial: | + apiVersion: machine.openshift.io/v1 + kind: ControlPlaneMachineSet + spec: + selector: + matchLabels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + template: + machineType: machines_v1beta1_machine_openshift_io + machines_v1beta1_machine_openshift_io: + metadata: + labels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + machine.openshift.io/cluster-api-cluster: cluster + spec: + providerSpec: {} + failureDomains: + platform: OpenStack + expectedError: "spec.template.machines_v1beta1_machine_openshift_io.failureDomains: Invalid value: \"object\": openstack configuration is required when platform is OpenStack" + - name: Should reject an OpenStack configured failure domain without a platform type + initial: | + apiVersion: machine.openshift.io/v1 + kind: ControlPlaneMachineSet + spec: + selector: + matchLabels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + template: + machineType: machines_v1beta1_machine_openshift_io + machines_v1beta1_machine_openshift_io: + metadata: + labels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + machine.openshift.io/cluster-api-cluster: cluster + spec: + providerSpec: {} + failureDomains: + openstack: + - availabilityZone: foo + expectedError: "spec.template.machines_v1beta1_machine_openshift_io.failureDomains.platform: Required value" + - name: Should reject an OpenStack configured failure domain with an empty OpenStack config + initial: | + apiVersion: machine.openshift.io/v1 + kind: ControlPlaneMachineSet + spec: + selector: + matchLabels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + template: + machineType: machines_v1beta1_machine_openshift_io + machines_v1beta1_machine_openshift_io: + metadata: + labels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + machine.openshift.io/cluster-api-cluster: cluster + spec: + providerSpec: {} + failureDomains: + platform: OpenStack + openstack: {} + expectedError: "spec.template.machines_v1beta1_machine_openshift_io.failureDomains.openstack in body must be of type array: \"object\"" + - name: Should accept no failureDomains + initial: | + apiVersion: machine.openshift.io/v1 + kind: ControlPlaneMachineSet + spec: + selector: + matchLabels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + template: + machineType: machines_v1beta1_machine_openshift_io + machines_v1beta1_machine_openshift_io: + metadata: + labels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + machine.openshift.io/cluster-api-cluster: cluster + spec: + providerSpec: {} + failureDomains: + platform: "" + expected: | + apiVersion: machine.openshift.io/v1 + kind: ControlPlaneMachineSet + spec: + replicas: 3 + state: Inactive + strategy: + type: RollingUpdate + selector: + matchLabels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + template: + machineType: machines_v1beta1_machine_openshift_io + machines_v1beta1_machine_openshift_io: + metadata: + labels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + machine.openshift.io/cluster-api-cluster: cluster + spec: + providerSpec: {} + failureDomains: + platform: "" + - name: Should reject an OpenStack configured failure domain with the wrong platform type + initial: | + apiVersion: machine.openshift.io/v1 + kind: ControlPlaneMachineSet + spec: + selector: + matchLabels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + template: + machineType: machines_v1beta1_machine_openshift_io + machines_v1beta1_machine_openshift_io: + metadata: + labels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + machine.openshift.io/cluster-api-cluster: cluster + spec: + providerSpec: {} + failureDomains: + platform: BareMetal + openstack: + - availabilityZone: foo + expectedError: "spec.template.machines_v1beta1_machine_openshift_io.failureDomains: Invalid value: \"object\": openstack configuration is required when platform is OpenStack, and forbidden otherwise" + - name: Should accept an OpenStack failure domain with only the availabilityZone provided + initial: | + apiVersion: machine.openshift.io/v1 + kind: ControlPlaneMachineSet + spec: + selector: + matchLabels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + template: + machineType: machines_v1beta1_machine_openshift_io + machines_v1beta1_machine_openshift_io: + metadata: + labels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + machine.openshift.io/cluster-api-cluster: cluster + spec: + providerSpec: {} + failureDomains: + platform: OpenStack + openstack: + - availabilityZone: foo + expected: | + apiVersion: machine.openshift.io/v1 + kind: ControlPlaneMachineSet + spec: + replicas: 3 + state: Inactive + strategy: + type: RollingUpdate + selector: + matchLabels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + template: + machineType: machines_v1beta1_machine_openshift_io + machines_v1beta1_machine_openshift_io: + metadata: + labels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + machine.openshift.io/cluster-api-cluster: cluster + spec: + providerSpec: {} + failureDomains: + platform: OpenStack + openstack: + - availabilityZone: foo + - name: Should accept an OpenStack failure domain with only the rootVolume provided + initial: | + apiVersion: machine.openshift.io/v1 + kind: ControlPlaneMachineSet + spec: + selector: + matchLabels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + template: + machineType: machines_v1beta1_machine_openshift_io + machines_v1beta1_machine_openshift_io: + metadata: + labels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + machine.openshift.io/cluster-api-cluster: cluster + spec: + providerSpec: {} + failureDomains: + platform: OpenStack + openstack: + - rootVolume: + availabilityZone: foo + volumeType: fast + expected: | + apiVersion: machine.openshift.io/v1 + kind: ControlPlaneMachineSet + spec: + replicas: 3 + state: Inactive + strategy: + type: RollingUpdate + selector: + matchLabels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + template: + machineType: machines_v1beta1_machine_openshift_io + machines_v1beta1_machine_openshift_io: + metadata: + labels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + machine.openshift.io/cluster-api-cluster: cluster + spec: + providerSpec: {} + failureDomains: + platform: OpenStack + openstack: + - rootVolume: + availabilityZone: foo + volumeType: fast + - name: Should accept an OpenStack failure domain with only the root volume type provided + initial: | + apiVersion: machine.openshift.io/v1 + kind: ControlPlaneMachineSet + spec: + selector: + matchLabels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + template: + machineType: machines_v1beta1_machine_openshift_io + machines_v1beta1_machine_openshift_io: + metadata: + labels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + machine.openshift.io/cluster-api-cluster: cluster + spec: + providerSpec: {} + failureDomains: + platform: OpenStack + openstack: + - rootVolume: + volumeType: typeone + expected: | + apiVersion: machine.openshift.io/v1 + kind: ControlPlaneMachineSet + spec: + replicas: 3 + state: Inactive + strategy: + type: RollingUpdate + selector: + matchLabels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + template: + machineType: machines_v1beta1_machine_openshift_io + machines_v1beta1_machine_openshift_io: + metadata: + labels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + machine.openshift.io/cluster-api-cluster: cluster + spec: + providerSpec: {} + failureDomains: + platform: OpenStack + openstack: + - rootVolume: + volumeType: typeone + - name: Should accept an OpenStack failure domain with both availabilityZone and rootVolume provided + initial: | + apiVersion: machine.openshift.io/v1 + kind: ControlPlaneMachineSet + spec: + selector: + matchLabels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + template: + machineType: machines_v1beta1_machine_openshift_io + machines_v1beta1_machine_openshift_io: + metadata: + labels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + machine.openshift.io/cluster-api-cluster: cluster + spec: + providerSpec: {} + failureDomains: + platform: OpenStack + openstack: + - availabilityZone: foo + rootVolume: + availabilityZone: foo + volumeType: fast + expected: | + apiVersion: machine.openshift.io/v1 + kind: ControlPlaneMachineSet + spec: + replicas: 3 + state: Inactive + strategy: + type: RollingUpdate + selector: + matchLabels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + template: + machineType: machines_v1beta1_machine_openshift_io + machines_v1beta1_machine_openshift_io: + metadata: + labels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + machine.openshift.io/cluster-api-cluster: cluster + spec: + providerSpec: {} + failureDomains: + platform: OpenStack + openstack: + - availabilityZone: foo + rootVolume: + availabilityZone: foo + volumeType: fast + - name: Should accept an OpenStack failure domain with both availabilityZone and root volume type provided + initial: | + apiVersion: machine.openshift.io/v1 + kind: ControlPlaneMachineSet + spec: + selector: + matchLabels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + template: + machineType: machines_v1beta1_machine_openshift_io + machines_v1beta1_machine_openshift_io: + metadata: + labels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + machine.openshift.io/cluster-api-cluster: cluster + spec: + providerSpec: {} + failureDomains: + platform: OpenStack + openstack: + - availabilityZone: foo + rootVolume: + availabilityZone: foo + volumeType: bar + expected: | + apiVersion: machine.openshift.io/v1 + kind: ControlPlaneMachineSet + spec: + replicas: 3 + state: Inactive + strategy: + type: RollingUpdate + selector: + matchLabels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + template: + machineType: machines_v1beta1_machine_openshift_io + machines_v1beta1_machine_openshift_io: + metadata: + labels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + machine.openshift.io/cluster-api-cluster: cluster + spec: + providerSpec: {} + failureDomains: + platform: OpenStack + openstack: + - availabilityZone: foo + rootVolume: + availabilityZone: foo + volumeType: bar + - name: Should reject an OpenStack failure domain with no rootVolume volumeType provided + initial: | + apiVersion: machine.openshift.io/v1 + kind: ControlPlaneMachineSet + spec: + selector: + matchLabels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + template: + machineType: machines_v1beta1_machine_openshift_io + machines_v1beta1_machine_openshift_io: + metadata: + labels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + machine.openshift.io/cluster-api-cluster: cluster + spec: + providerSpec: {} + failureDomains: + platform: OpenStack + openstack: + - rootVolume: + availabilityZone: foo + expectedError: "spec.template.machines_v1beta1_machine_openshift_io.failureDomains.openstack[0].rootVolume.volumeType: Required value, : Invalid value: \"null\": some validation rules were not checked" + - name: Should reject an OpenStack failure domain with an empty rootVolume volumeType provided + initial: | + apiVersion: machine.openshift.io/v1 + kind: ControlPlaneMachineSet + spec: + selector: + matchLabels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + template: + machineType: machines_v1beta1_machine_openshift_io + machines_v1beta1_machine_openshift_io: + metadata: + labels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + machine.openshift.io/cluster-api-cluster: cluster + spec: + providerSpec: {} + failureDomains: + platform: OpenStack + openstack: + - rootVolume: + volumeType: "" + expectedError: "spec.template.machines_v1beta1_machine_openshift_io.failureDomains.openstack[0].rootVolume.volumeType: Invalid value: \"\": spec.template.machines_v1beta1_machine_openshift_io.failureDomains.openstack[0].rootVolume.volumeType in body should be at least 1 chars long" + - name: Should reject an OpenStack failure domain with too long a rootVolume volumeType name + initial: | + apiVersion: machine.openshift.io/v1 + kind: ControlPlaneMachineSet + spec: + selector: + matchLabels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + template: + machineType: machines_v1beta1_machine_openshift_io + machines_v1beta1_machine_openshift_io: + metadata: + labels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + machine.openshift.io/cluster-api-cluster: cluster + spec: + providerSpec: {} + failureDomains: + platform: OpenStack + openstack: + - availabilityZone: foo + rootVolume: + volumeType: a123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345 + expectedError: "spec.template.machines_v1beta1_machine_openshift_io.failureDomains.openstack[0].rootVolume.volumeType: Too long: may not be longer than 255" + - name: Should reject an OpenStack failure domain with both availabilityZone and root volume provided but with missing root volume availabilityZone + initial: | + apiVersion: machine.openshift.io/v1 + kind: ControlPlaneMachineSet + spec: + selector: + matchLabels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + template: + machineType: machines_v1beta1_machine_openshift_io + machines_v1beta1_machine_openshift_io: + metadata: + labels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + machine.openshift.io/cluster-api-cluster: cluster + spec: + providerSpec: {} + failureDomains: + platform: OpenStack + openstack: + - availabilityZone: foo + rootVolume: + volumeType: bar + expectedError: "spec.template.machines_v1beta1_machine_openshift_io.failureDomains.openstack[0]: Invalid value: \"object\": rootVolume.availabilityZone is required when availabilityZone is set" + - name: Should reject an empty OpenStack failure domain + initial: | + apiVersion: machine.openshift.io/v1 + kind: ControlPlaneMachineSet + spec: + selector: + matchLabels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + template: + machineType: machines_v1beta1_machine_openshift_io + machines_v1beta1_machine_openshift_io: + metadata: + labels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + machine.openshift.io/cluster-api-cluster: cluster + spec: + providerSpec: {} + failureDomains: + platform: OpenStack + openstack: + - availabilityZone: foo + - {} + expectedError: "spec.template.machines_v1beta1_machine_openshift_io.failureDomains.openstack[1] in body should have at least 1 properties" + - name: Should reject an OpenStack failure domain with an empty availabilityZone provided + initial: | + apiVersion: machine.openshift.io/v1 + kind: ControlPlaneMachineSet + spec: + selector: + matchLabels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + template: + machineType: machines_v1beta1_machine_openshift_io + machines_v1beta1_machine_openshift_io: + metadata: + labels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + machine.openshift.io/cluster-api-cluster: cluster + spec: + providerSpec: {} + failureDomains: + platform: OpenStack + openstack: + - availabilityZone: "" + expectedError: "spec.template.machines_v1beta1_machine_openshift_io.failureDomains.openstack[0].availabilityZone in body should be at least 1 chars long" + - name: Should reject an OpenStack failure domain with an empty rootVolume availabilityZone provided + initial: | + apiVersion: machine.openshift.io/v1 + kind: ControlPlaneMachineSet + spec: + selector: + matchLabels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + template: + machineType: machines_v1beta1_machine_openshift_io + machines_v1beta1_machine_openshift_io: + metadata: + labels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + machine.openshift.io/cluster-api-cluster: cluster + spec: + providerSpec: {} + failureDomains: + platform: OpenStack + openstack: + - rootVolume: + availabilityZone: "" + volumeType: fast + expectedError: "spec.template.machines_v1beta1_machine_openshift_io.failureDomains.openstack[0].rootVolume.availabilityZone in body should be at least 1 chars long" + - name: Should reject an OpenStack failure domain with an invalid availabilityZone provided + initial: | + apiVersion: machine.openshift.io/v1 + kind: ControlPlaneMachineSet + spec: + selector: + matchLabels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + template: + machineType: machines_v1beta1_machine_openshift_io + machines_v1beta1_machine_openshift_io: + metadata: + labels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + machine.openshift.io/cluster-api-cluster: cluster + spec: + providerSpec: {} + failureDomains: + platform: OpenStack + openstack: + - availabilityZone: foo:bar + expectedError: "spec.template.machines_v1beta1_machine_openshift_io.failureDomains.openstack[0].availabilityZone in body should match" + - name: Should reject an OpenStack failure domain with an invalid availabilityZone provided for rootVolume + initial: | + apiVersion: machine.openshift.io/v1 + kind: ControlPlaneMachineSet + spec: + selector: + matchLabels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + template: + machineType: machines_v1beta1_machine_openshift_io + machines_v1beta1_machine_openshift_io: + metadata: + labels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + machine.openshift.io/cluster-api-cluster: cluster + spec: + providerSpec: {} + failureDomains: + platform: OpenStack + openstack: + - rootVolume: + availabilityZone: "foo bar" + volumeType: fast + expectedError: "spec.template.machines_v1beta1_machine_openshift_io.failureDomains.openstack[0].rootVolume.availabilityZone in body should match" diff --git a/vendor/github.com/openshift/api/machine/v1/stable.controlplanemachineset.testsuite.yaml b/vendor/github.com/openshift/api/machine/v1/stable.controlplanemachineset.testsuite.yaml new file mode 100644 index 000000000..3e65b31f6 --- /dev/null +++ b/vendor/github.com/openshift/api/machine/v1/stable.controlplanemachineset.testsuite.yaml @@ -0,0 +1,488 @@ +apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this +name: "[Stable] ControlPlaneMachineSet" +crd: 0000_10_controlplanemachineset.crd.yaml +tests: + onCreate: + - name: Should be able to create a minimal ControlPlaneMachineSet + initial: | + apiVersion: machine.openshift.io/v1 + kind: ControlPlaneMachineSet + spec: + selector: + matchLabels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + template: + machineType: machines_v1beta1_machine_openshift_io + machines_v1beta1_machine_openshift_io: + metadata: + labels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + machine.openshift.io/cluster-api-cluster: cluster + spec: + providerSpec: {} + expected: | + apiVersion: machine.openshift.io/v1 + kind: ControlPlaneMachineSet + spec: + replicas: 3 + state: Inactive + strategy: + type: RollingUpdate + selector: + matchLabels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + template: + machineType: machines_v1beta1_machine_openshift_io + machines_v1beta1_machine_openshift_io: + metadata: + labels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + machine.openshift.io/cluster-api-cluster: cluster + spec: + providerSpec: {} + - name: Should reject a missing machineType + initial: | + apiVersion: machine.openshift.io/v1 + kind: ControlPlaneMachineSet + spec: + selector: + matchLabels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + template: + machines_v1beta1_machine_openshift_io: + metadata: + labels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + machine.openshift.io/cluster-api-cluster: cluster + spec: + providerSpec: {} + expectedError: "spec.template.machineType: Required value" + - name: Should reject a missing machines_v1beta1_machine_openshift_io + initial: | + apiVersion: machine.openshift.io/v1 + kind: ControlPlaneMachineSet + spec: + selector: + matchLabels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + template: + machineType: machines_v1beta1_machine_openshift_io + expectedError: "spec.template: Invalid value: \"object\": machines_v1beta1_machine_openshift_io configuration is required when machineType is machines_v1beta1_machine_openshift_io, and forbidden otherwise" + - name: Should reject a worker role label + initial: | + apiVersion: machine.openshift.io/v1 + kind: ControlPlaneMachineSet + spec: + selector: + matchLabels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + template: + machineType: machines_v1beta1_machine_openshift_io + machines_v1beta1_machine_openshift_io: + metadata: + labels: + machine.openshift.io/cluster-api-machine-role: worker + machine.openshift.io/cluster-api-machine-type: master + machine.openshift.io/cluster-api-cluster: cluster + spec: + providerSpec: {} + expectedError: "spec.template.machines_v1beta1_machine_openshift_io.metadata.labels: Invalid value: \"object\": label 'machine.openshift.io/cluster-api-machine-role' is required, and must have value 'master'" + - name: Should reject a missing role label + initial: | + apiVersion: machine.openshift.io/v1 + kind: ControlPlaneMachineSet + spec: + selector: + matchLabels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + template: + machineType: machines_v1beta1_machine_openshift_io + machines_v1beta1_machine_openshift_io: + metadata: + labels: + machine.openshift.io/cluster-api-machine-type: master + machine.openshift.io/cluster-api-cluster: cluster + spec: + providerSpec: {} + expectedError: "spec.template.machines_v1beta1_machine_openshift_io.metadata.labels: Invalid value: \"object\": label 'machine.openshift.io/cluster-api-machine-role' is required, and must have value 'master'" + - name: Should reject a worker type label + initial: | + apiVersion: machine.openshift.io/v1 + kind: ControlPlaneMachineSet + spec: + selector: + matchLabels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + template: + machineType: machines_v1beta1_machine_openshift_io + machines_v1beta1_machine_openshift_io: + metadata: + labels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: worker + machine.openshift.io/cluster-api-cluster: cluster + spec: + providerSpec: {} + expectedError: "spec.template.machines_v1beta1_machine_openshift_io.metadata.labels: Invalid value: \"object\": label 'machine.openshift.io/cluster-api-machine-type' is required, and must have value 'master'" + - name: Should reject a missing type label + initial: | + apiVersion: machine.openshift.io/v1 + kind: ControlPlaneMachineSet + spec: + selector: + matchLabels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + template: + machineType: machines_v1beta1_machine_openshift_io + machines_v1beta1_machine_openshift_io: + metadata: + labels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-cluster: cluster + spec: + providerSpec: {} + expectedError: "spec.template.machines_v1beta1_machine_openshift_io.metadata.labels: Invalid value: \"object\": label 'machine.openshift.io/cluster-api-machine-type' is required, and must have value 'master'" + - name: Should reject a missing cluster ID label + initial: | + apiVersion: machine.openshift.io/v1 + kind: ControlPlaneMachineSet + spec: + selector: + matchLabels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + template: + machineType: machines_v1beta1_machine_openshift_io + machines_v1beta1_machine_openshift_io: + metadata: + labels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + spec: + providerSpec: {} + expectedError: "spec.template.machines_v1beta1_machine_openshift_io.metadata.labels: Invalid value: \"object\": label 'machine.openshift.io/cluster-api-cluster' is required" + - name: Should be able to create an Active ControlPlaneMachineSet + initial: | + apiVersion: machine.openshift.io/v1 + kind: ControlPlaneMachineSet + spec: + state: Active + selector: + matchLabels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + template: + machineType: machines_v1beta1_machine_openshift_io + machines_v1beta1_machine_openshift_io: + metadata: + labels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + machine.openshift.io/cluster-api-cluster: cluster + spec: + providerSpec: {} + expected: | + apiVersion: machine.openshift.io/v1 + kind: ControlPlaneMachineSet + spec: + replicas: 3 + state: Active + strategy: + type: RollingUpdate + selector: + matchLabels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + template: + machineType: machines_v1beta1_machine_openshift_io + machines_v1beta1_machine_openshift_io: + metadata: + labels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + machine.openshift.io/cluster-api-cluster: cluster + spec: + providerSpec: {} + onUpdate: + - name: Replicas should be immutable + initial: | + apiVersion: machine.openshift.io/v1 + kind: ControlPlaneMachineSet + spec: + replicas: 3 + state: Inactive + strategy: + type: RollingUpdate + selector: + matchLabels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + template: + machineType: machines_v1beta1_machine_openshift_io + machines_v1beta1_machine_openshift_io: + metadata: + labels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + machine.openshift.io/cluster-api-cluster: cluster + spec: + providerSpec: {} + updated: | + apiVersion: machine.openshift.io/v1 + kind: ControlPlaneMachineSet + spec: + replicas: 5 + state: Inactive + strategy: + type: RollingUpdate + selector: + matchLabels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + template: + machineType: machines_v1beta1_machine_openshift_io + machines_v1beta1_machine_openshift_io: + metadata: + labels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + machine.openshift.io/cluster-api-cluster: cluster + spec: + providerSpec: {} + expectedError: "spec.replicas: Invalid value: \"integer\": replicas is immutable" + - name: Selector should be immutable + initial: | + apiVersion: machine.openshift.io/v1 + kind: ControlPlaneMachineSet + spec: + replicas: 3 + state: Inactive + strategy: + type: RollingUpdate + selector: + matchLabels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + template: + machineType: machines_v1beta1_machine_openshift_io + machines_v1beta1_machine_openshift_io: + metadata: + labels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + machine.openshift.io/cluster-api-cluster: cluster + spec: + providerSpec: {} + updated: | + apiVersion: machine.openshift.io/v1 + kind: ControlPlaneMachineSet + spec: + replicas: 3 + state: Inactive + strategy: + type: RollingUpdate + selector: + matchLabels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + foo: bar + template: + machineType: machines_v1beta1_machine_openshift_io + machines_v1beta1_machine_openshift_io: + metadata: + labels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + machine.openshift.io/cluster-api-cluster: cluster + spec: + providerSpec: {} + expectedError: "spec.selector: Invalid value: \"object\": selector is immutable" + - name: Should default the strategy when removed + initial: | + apiVersion: machine.openshift.io/v1 + kind: ControlPlaneMachineSet + spec: + replicas: 3 + state: Inactive + strategy: + type: OnDelete + selector: + matchLabels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + template: + machineType: machines_v1beta1_machine_openshift_io + machines_v1beta1_machine_openshift_io: + metadata: + labels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + machine.openshift.io/cluster-api-cluster: cluster + spec: + providerSpec: {} + updated: | + apiVersion: machine.openshift.io/v1 + kind: ControlPlaneMachineSet + spec: + replicas: 3 + state: Inactive + selector: + matchLabels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + template: + machineType: machines_v1beta1_machine_openshift_io + machines_v1beta1_machine_openshift_io: + metadata: + labels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + machine.openshift.io/cluster-api-cluster: cluster + spec: + providerSpec: {} + expected: | + apiVersion: machine.openshift.io/v1 + kind: ControlPlaneMachineSet + spec: + replicas: 3 + state: Inactive + strategy: + type: RollingUpdate + selector: + matchLabels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + template: + machineType: machines_v1beta1_machine_openshift_io + machines_v1beta1_machine_openshift_io: + metadata: + labels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + machine.openshift.io/cluster-api-cluster: cluster + spec: + providerSpec: {} + - name: Should allow the state to change to Active + initial: | + apiVersion: machine.openshift.io/v1 + kind: ControlPlaneMachineSet + spec: + replicas: 3 + state: Inactive + strategy: + type: RollingUpdate + selector: + matchLabels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + template: + machineType: machines_v1beta1_machine_openshift_io + machines_v1beta1_machine_openshift_io: + metadata: + labels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + machine.openshift.io/cluster-api-cluster: cluster + spec: + providerSpec: {} + updated: | + apiVersion: machine.openshift.io/v1 + kind: ControlPlaneMachineSet + spec: + replicas: 3 + state: Active + strategy: + type: RollingUpdate + selector: + matchLabels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + template: + machineType: machines_v1beta1_machine_openshift_io + machines_v1beta1_machine_openshift_io: + metadata: + labels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + machine.openshift.io/cluster-api-cluster: cluster + spec: + providerSpec: {} + expected: | + apiVersion: machine.openshift.io/v1 + kind: ControlPlaneMachineSet + spec: + replicas: 3 + state: Active + strategy: + type: RollingUpdate + selector: + matchLabels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + template: + machineType: machines_v1beta1_machine_openshift_io + machines_v1beta1_machine_openshift_io: + metadata: + labels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + machine.openshift.io/cluster-api-cluster: cluster + spec: + providerSpec: {} + - name: Should not allow the state to change from Active + initial: | + apiVersion: machine.openshift.io/v1 + kind: ControlPlaneMachineSet + spec: + replicas: 3 + state: Active + strategy: + type: RollingUpdate + selector: + matchLabels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + template: + machineType: machines_v1beta1_machine_openshift_io + machines_v1beta1_machine_openshift_io: + metadata: + labels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + machine.openshift.io/cluster-api-cluster: cluster + spec: + providerSpec: {} + updated: | + apiVersion: machine.openshift.io/v1 + kind: ControlPlaneMachineSet + spec: + replicas: 3 + state: Inactive + strategy: + type: RollingUpdate + selector: + matchLabels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + template: + machineType: machines_v1beta1_machine_openshift_io + machines_v1beta1_machine_openshift_io: + metadata: + labels: + machine.openshift.io/cluster-api-machine-role: master + machine.openshift.io/cluster-api-machine-type: master + machine.openshift.io/cluster-api-cluster: cluster + spec: + providerSpec: {} + expectedError: "spec.state: Invalid value: \"string\": state cannot be changed once Active" diff --git a/vendor/github.com/openshift/api/machine/v1/types_alibabaprovider.go b/vendor/github.com/openshift/api/machine/v1/types_alibabaprovider.go new file mode 100644 index 000000000..4b5c8d6ef --- /dev/null +++ b/vendor/github.com/openshift/api/machine/v1/types_alibabaprovider.go @@ -0,0 +1,374 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// AlibabaDiskPerformanceLevel enum attribute to describe a disk's performance level +type AlibabaDiskPerformanceLevel string + +// AlibabaDiskCatagory enum attribute to deescribe a disk's category +type AlibabaDiskCategory string + +// AlibabaDiskEncryptionMode enum attribute to describe whether to enable or disable disk encryption +type AlibabaDiskEncryptionMode string + +// AlibabaDiskPreservationPolicy enum attribute to describe whether to preserve or delete a disk upon instance removal +type AlibabaDiskPreservationPolicy string + +// AlibabaResourceReferenceType enum attribute to identify the type of resource reference +type AlibabaResourceReferenceType string + +const ( + // DeleteWithInstance enum property to delete disk with instance deletion + DeleteWithInstance AlibabaDiskPreservationPolicy = "DeleteWithInstance" + // PreserveDisk enum property to determine disk preservation with instance deletion + PreserveDisk AlibabaDiskPreservationPolicy = "PreserveDisk" + + // AlibabaDiskEncryptionEnabled enum property to enable disk encryption + AlibabaDiskEncryptionEnabled AlibabaDiskEncryptionMode = "encrypted" + // AlibabaDiskEncryptionDisabled enum property to disable disk encryption + AlibabaDiskEncryptionDisabled AlibabaDiskEncryptionMode = "disabled" + + // AlibabaDiskPerformanceLevel0 enum property to set the level at PL0 + PL0 AlibabaDiskPerformanceLevel = "PL0" + // AlibabaDiskPerformanceLevel1 enum property to set the level at PL1 + PL1 AlibabaDiskPerformanceLevel = "PL1" + // AlibabaDiskPerformanceLevel2 enum property to set the level at PL2 + PL2 AlibabaDiskPerformanceLevel = "PL2" + // AlibabaDiskPerformanceLevel3 enum property to set the level at PL3 + PL3 AlibabaDiskPerformanceLevel = "PL3" + + // AlibabaDiskCategoryUltraDisk enum proprty to set the category of disk to ultra disk + AlibabaDiskCatagoryUltraDisk AlibabaDiskCategory = "cloud_efficiency" + // AlibabaDiskCategorySSD enum proprty to set the category of disk to standard SSD + AlibabaDiskCatagorySSD AlibabaDiskCategory = "cloud_ssd" + // AlibabaDiskCategoryESSD enum proprty to set the category of disk to ESSD + AlibabaDiskCatagoryESSD AlibabaDiskCategory = "cloud_essd" + // AlibabaDiskCategoryBasic enum proprty to set the category of disk to basic + AlibabaDiskCatagoryBasic AlibabaDiskCategory = "cloud" + + // AlibabaResourceReferenceTypeID enum property to identify an ID type resource reference + AlibabaResourceReferenceTypeID AlibabaResourceReferenceType = "ID" + // AlibabaResourceReferenceTypeName enum property to identify an Name type resource reference + AlibabaResourceReferenceTypeName AlibabaResourceReferenceType = "Name" + // AlibabaResourceReferenceTypeTags enum property to identify a tags type resource reference + AlibabaResourceReferenceTypeTags AlibabaResourceReferenceType = "Tags" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// AlibabaCloudMachineProviderConfig is the Schema for the alibabacloudmachineproviderconfig API +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +// +k8s:openapi-gen=true +type AlibabaCloudMachineProviderConfig struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty"` + + // More detail about alibabacloud ECS + // https://www.alibabacloud.com/help/doc-detail/25499.htm?spm=a2c63.l28256.b99.727.496d7453jF7Moz + + //The instance type of the instance. + InstanceType string `json:"instanceType"` + + // The ID of the vpc + VpcID string `json:"vpcId"` + + // The ID of the region in which to create the instance. You can call the DescribeRegions operation to query the most recent region list. + RegionID string `json:"regionId"` + + // The ID of the zone in which to create the instance. You can call the DescribeZones operation to query the most recent region list. + ZoneID string `json:"zoneId"` + + // The ID of the image used to create the instance. + ImageID string `json:"imageId"` + + // DataDisks holds information regarding the extra disks attached to the instance + // +optional + DataDisks []DataDiskProperties `json:"dataDisk,omitempty"` + + // SecurityGroups is a list of security group references to assign to the instance. + // A reference holds either the security group ID, the resource name, or the required tags to search. + // When more than one security group is returned for a tag search, all the groups are associated with the instance up to the + // maximum number of security groups to which an instance can belong. + // For more information, see the "Security group limits" section in Limits. + // https://www.alibabacloud.com/help/en/doc-detail/25412.htm + SecurityGroups []AlibabaResourceReference `json:"securityGroups,omitempty"` + + // Bandwidth describes the internet bandwidth strategy for the instance + // +optional + Bandwidth BandwidthProperties `json:"bandwidth,omitempty"` + + // SystemDisk holds the properties regarding the system disk for the instance + // +optional + SystemDisk SystemDiskProperties `json:"systemDisk,omitempty"` + + // VSwitch is a reference to the vswitch to use for this instance. + // A reference holds either the vSwitch ID, the resource name, or the required tags to search. + // When more than one vSwitch is returned for a tag search, only the first vSwitch returned will be used. + // This parameter is required when you create an instance of the VPC type. + // You can call the DescribeVSwitches operation to query the created vSwitches. + VSwitch AlibabaResourceReference `json:"vSwitch"` + + // RAMRoleName is the name of the instance Resource Access Management (RAM) role. This allows the instance to perform API calls as this specified RAM role. + // +optional + RAMRoleName string `json:"ramRoleName,omitempty"` + + // ResourceGroup references the resource group to which to assign the instance. + // A reference holds either the resource group ID, the resource name, or the required tags to search. + // When more than one resource group are returned for a search, an error will be produced and the Machine will not be created. + // Resource Groups do not support searching by tags. + ResourceGroup AlibabaResourceReference `json:"resourceGroup"` + + // Tenancy specifies whether to create the instance on a dedicated host. + // Valid values: + // + // default: creates the instance on a non-dedicated host. + // host: creates the instance on a dedicated host. If you do not specify the DedicatedHostID parameter, Alibaba Cloud automatically selects a dedicated host for the instance. + // Empty value means no opinion and the platform chooses the a default, which is subject to change over time. + // Currently the default is `default`. + // +optional + Tenancy InstanceTenancy `json:"tenancy,omitempty"` + + // UserDataSecret contains a local reference to a secret that contains the + // UserData to apply to the instance + // +optional + UserDataSecret *corev1.LocalObjectReference `json:"userDataSecret,omitempty"` + + // CredentialsSecret is a reference to the secret with alibabacloud credentials. Otherwise, defaults to permissions + // provided by attached RAM role where the actuator is running. + // +optional + CredentialsSecret *corev1.LocalObjectReference `json:"credentialsSecret,omitempty"` + + // Tags are the set of metadata to add to an instance. + // +optional + Tags []Tag `json:"tag,omitempty"` +} + +// ResourceTagReference is a reference to a specific AlibabaCloud resource by ID, or tags. +// Only one of ID or Tags may be specified. Specifying more than one will result in +// a validation error. +type AlibabaResourceReference struct { + // type identifies the resource reference type for this entry. + Type AlibabaResourceReferenceType `json:"type"` + + // ID of resource + // +optional + ID *string `json:"id,omitempty"` + + // Name of the resource + // +optional + Name *string `json:"name,omitempty"` + + // Tags is a set of metadata based upon ECS object tags used to identify a resource. + // For details about usage when multiple resources are found, please see the owning parent field documentation. + // +optional + Tags *[]Tag `json:"tags,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// AlibabaCloudMachineProviderConfigList contains a list of AlibabaCloudMachineProviderConfig +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type AlibabaCloudMachineProviderConfigList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty"` + + Items []AlibabaCloudMachineProviderConfig `json:"items"` +} + +// AlibabaCloudMachineProviderStatus is the Schema for the alibabacloudmachineproviderconfig API +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type AlibabaCloudMachineProviderStatus struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty"` + + // InstanceID is the instance ID of the machine created in alibabacloud + // +optional + InstanceID *string `json:"instanceId,omitempty"` + + // InstanceState is the state of the alibabacloud instance for this machine + // +optional + InstanceState *string `json:"instanceState,omitempty"` + + // Conditions is a set of conditions associated with the Machine to indicate + // errors or other status + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` +} + +// SystemDiskProperties contains the information regarding the system disk including performance, size, name, and category +type SystemDiskProperties struct { + // Category is the category of the system disk. + // Valid values: + // cloud_essd: ESSD. When the parameter is set to this value, you can use the SystemDisk.PerformanceLevel parameter to specify the performance level of the disk. + // cloud_efficiency: ultra disk. + // cloud_ssd: standard SSD. + // cloud: basic disk. + // Empty value means no opinion and the platform chooses the a default, which is subject to change over time. + // Currently for non-I/O optimized instances of retired instance types, the default is `cloud`. + // Currently for other instances, the default is `cloud_efficiency`. + // +kubebuilder:validation:Enum="cloud_efficiency"; "cloud_ssd"; "cloud_essd"; "cloud" + // +optional + Category string `json:"category,omitempty"` + + // PerformanceLevel is the performance level of the ESSD used as the system disk. + // Valid values: + // + // PL0: A single ESSD can deliver up to 10,000 random read/write IOPS. + // PL1: A single ESSD can deliver up to 50,000 random read/write IOPS. + // PL2: A single ESSD can deliver up to 100,000 random read/write IOPS. + // PL3: A single ESSD can deliver up to 1,000,000 random read/write IOPS. + // Empty value means no opinion and the platform chooses a default, which is subject to change over time. + // Currently the default is `PL1`. + // For more information about ESSD performance levels, see ESSDs. + // +kubebuilder:validation:Enum="PL0"; "PL1"; "PL2"; "PL3" + // +optional + PerformanceLevel string `json:"performanceLevel,omitempty"` + + // Name is the name of the system disk. If the name is specified the name must be 2 to 128 characters in length. It must start with a letter and cannot start with http:// or https://. It can contain letters, digits, colons (:), underscores (_), and hyphens (-). + // Empty value means the platform chooses a default, which is subject to change over time. + // Currently the default is `""`. + // +kubebuilder:validation:MaxLength=128 + // +optional + Name string `json:"name,omitempty"` + + // Size is the size of the system disk. Unit: GiB. Valid values: 20 to 500. + // The value must be at least 20 and greater than or equal to the size of the image. + // Empty value means the platform chooses a default, which is subject to change over time. + // Currently the default is `40` or the size of the image depending on whichever is greater. + // +optional + Size int64 `json:"size,omitempty"` +} + +// DataDisk contains the information regarding the datadisk attached to an instance +type DataDiskProperties struct { + // Name is the name of data disk N. If the name is specified the name must be 2 to 128 characters in length. It must start with a letter and cannot start with http:// or https://. It can contain letters, digits, colons (:), underscores (_), and hyphens (-). + // + // Empty value means the platform chooses a default, which is subject to change over time. + // Currently the default is `""`. + // +optional + Name string `name:"diskName,omitempty"` + + // SnapshotID is the ID of the snapshot used to create data disk N. Valid values of N: 1 to 16. + // + // When the DataDisk.N.SnapshotID parameter is specified, the DataDisk.N.Size parameter is ignored. The data disk is created based on the size of the specified snapshot. + // Use snapshots created after July 15, 2013. Otherwise, an error is returned and your request is rejected. + // + // +optional + SnapshotID string `name:"snapshotId,omitempty"` + + // Size of the data disk N. Valid values of N: 1 to 16. Unit: GiB. Valid values: + // + // Valid values when DataDisk.N.Category is set to cloud_efficiency: 20 to 32768 + // Valid values when DataDisk.N.Category is set to cloud_ssd: 20 to 32768 + // Valid values when DataDisk.N.Category is set to cloud_essd: 20 to 32768 + // Valid values when DataDisk.N.Category is set to cloud: 5 to 2000 + // The value of this parameter must be greater than or equal to the size of the snapshot specified by the SnapshotID parameter. + // +optional + Size int64 `name:"size,omitempty"` + + // DiskEncryption specifies whether to encrypt data disk N. + // + // Empty value means the platform chooses a default, which is subject to change over time. + // Currently the default is `disabled`. + // +kubebuilder:validation:Enum="encrypted";"disabled" + // +optional + DiskEncryption AlibabaDiskEncryptionMode `name:"diskEncryption,omitempty"` + + // PerformanceLevel is the performance level of the ESSD used as as data disk N. The N value must be the same as that in DataDisk.N.Category when DataDisk.N.Category is set to cloud_essd. + // Empty value means no opinion and the platform chooses a default, which is subject to change over time. + // Currently the default is `PL1`. + // Valid values: + // + // PL0: A single ESSD can deliver up to 10,000 random read/write IOPS. + // PL1: A single ESSD can deliver up to 50,000 random read/write IOPS. + // PL2: A single ESSD can deliver up to 100,000 random read/write IOPS. + // PL3: A single ESSD can deliver up to 1,000,000 random read/write IOPS. + // For more information about ESSD performance levels, see ESSDs. + // +kubebuilder:validation:Enum="PL0"; "PL1"; "PL2"; "PL3" + // +optional + PerformanceLevel AlibabaDiskPerformanceLevel `name:"performanceLevel,omitempty"` + + // Category describes the type of data disk N. + // Valid values: + // cloud_efficiency: ultra disk + // cloud_ssd: standard SSD + // cloud_essd: ESSD + // cloud: basic disk + // Empty value means no opinion and the platform chooses the a default, which is subject to change over time. + // Currently for non-I/O optimized instances of retired instance types, the default is `cloud`. + // Currently for other instances, the default is `cloud_efficiency`. + // +kubebuilder:validation:Enum="cloud_efficiency"; "cloud_ssd"; "cloud_essd"; "cloud" + // +optional + Category AlibabaDiskCategory `name:"category,omitempty"` + + // KMSKeyID is the ID of the Key Management Service (KMS) key to be used by data disk N. + // Empty value means no opinion and the platform chooses the a default, which is subject to change over time. + // Currently the default is `""` which is interpreted as do not use KMSKey encryption. + // +optional + KMSKeyID string `name:"kmsKeyId,omitempty"` + + // DiskPreservation specifies whether to release data disk N along with the instance. + // Empty value means no opinion and the platform chooses the a default, which is subject to change over time. + // Currently the default is `DeleteWithInstance` + // +kubebuilder:validation:Enum="DeleteWithInstance";"PreserveDisk" + // +optional + DiskPreservation AlibabaDiskPreservationPolicy `name:"diskPreservation,omitempty"` +} + +// Tag The tags of ECS Instance +type Tag struct { + // Key is the name of the key pair + Key string `name:"Key"` + // Value is the value or data of the key pair + Value string `name:"value"` +} + +// Bandwidth describes the bandwidth strategy for the network of the instance +type BandwidthProperties struct { + // InternetMaxBandwidthIn is the maximum inbound public bandwidth. Unit: Mbit/s. Valid values: + // When the purchased outbound public bandwidth is less than or equal to 10 Mbit/s, the valid values of this parameter are 1 to 10. + // Currently the default is `10` when outbound bandwidth is less than or equal to 10 Mbit/s. + // When the purchased outbound public bandwidth is greater than 10, the valid values are 1 to the InternetMaxBandwidthOut value. + // Currently the default is the value used for `InternetMaxBandwidthOut` when outbound public bandwidth is greater than 10. + // +optional + InternetMaxBandwidthIn int64 `json:"internetMaxBandwidthIn,omitempty"` + + // InternetMaxBandwidthOut is the maximum outbound public bandwidth. Unit: Mbit/s. Valid values: 0 to 100. + // When a value greater than 0 is used then a public IP address is assigned to the instance. + // Empty value means no opinion and the platform chooses the a default, which is subject to change over time. + // Currently the default is `0` + // +optional + InternetMaxBandwidthOut int64 `json:"internetMaxBandwidthOut,omitempty"` +} diff --git a/vendor/github.com/openshift/api/machine/v1/types_aws.go b/vendor/github.com/openshift/api/machine/v1/types_aws.go new file mode 100644 index 000000000..a41237c3b --- /dev/null +++ b/vendor/github.com/openshift/api/machine/v1/types_aws.go @@ -0,0 +1,49 @@ +package v1 + +// AWSResourceReference is a reference to a specific AWS resource by ID, ARN, or filters. +// Only one of ID, ARN or Filters may be specified. Specifying more than one will result in +// a validation error. +// +union +// +kubebuilder:validation:XValidation:rule="has(self.type) && self.type == 'ID' ? has(self.id) : !has(self.id)",message="id is required when type is ID, and forbidden otherwise" +// +kubebuilder:validation:XValidation:rule="has(self.type) && self.type == 'ARN' ? has(self.arn) : !has(self.arn)",message="arn is required when type is ARN, and forbidden otherwise" +// +kubebuilder:validation:XValidation:rule="has(self.type) && self.type == 'Filters' ? has(self.filters) : !has(self.filters)",message="filters is required when type is Filters, and forbidden otherwise" +type AWSResourceReference struct { + // Type determines how the reference will fetch the AWS resource. + // +unionDiscriminator + // +kubebuilder:validation:Required + Type AWSResourceReferenceType `json:"type"` + // ID of resource. + // +optional + ID *string `json:"id,omitempty"` + // ARN of resource. + // +optional + ARN *string `json:"arn,omitempty"` + // Filters is a set of filters used to identify a resource. + // +optional + Filters *[]AWSResourceFilter `json:"filters,omitempty"` +} + +// AWSResourceReferenceType is an enumeration of different resource reference types. +// +kubebuilder:validation:Enum:="ID";"ARN";"Filters" +type AWSResourceReferenceType string + +const ( + // AWSIDReferenceType is a resource reference based on the object ID. + AWSIDReferenceType AWSResourceReferenceType = "ID" + + // AWSARNReferenceType is a resource reference based on the object ARN. + AWSARNReferenceType AWSResourceReferenceType = "ARN" + + // AWSFiltersReferenceType is a resource reference based on filters. + AWSFiltersReferenceType AWSResourceReferenceType = "Filters" +) + +// AWSResourceFilter is a filter used to identify an AWS resource +type AWSResourceFilter struct { + // Name of the filter. Filter names are case-sensitive. + // +kubebuilder:validation:Required + Name string `json:"name"` + // Values includes one or more filter values. Filter values are case-sensitive. + // +optional + Values []string `json:"values,omitempty"` +} diff --git a/vendor/github.com/openshift/api/machine/v1/types_controlplanemachineset.go b/vendor/github.com/openshift/api/machine/v1/types_controlplanemachineset.go new file mode 100644 index 000000000..9f81f4d10 --- /dev/null +++ b/vendor/github.com/openshift/api/machine/v1/types_controlplanemachineset.go @@ -0,0 +1,420 @@ +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" + machinev1beta1 "github.com/openshift/api/machine/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ControlPlaneMachineSet ensures that a specified number of control plane machine replicas are running at any given time. +// +k8s:openapi-gen=true +// +kubebuilder:resource:scope=Namespaced +// +kubebuilder:subresource:status +// +kubebuilder:subresource:scale:specpath=.spec.replicas,statuspath=.status.replicas +// +kubebuilder:printcolumn:name="Desired",type="integer",JSONPath=".spec.replicas",description="Desired Replicas" +// +kubebuilder:printcolumn:name="Current",type="integer",JSONPath=".status.replicas",description="Current Replicas" +// +kubebuilder:printcolumn:name="Ready",type="integer",JSONPath=".status.readyReplicas",description="Ready Replicas" +// +kubebuilder:printcolumn:name="Updated",type="integer",JSONPath=".status.updatedReplicas",description="Updated Replicas" +// +kubebuilder:printcolumn:name="Unavailable",type="integer",JSONPath=".status.unavailableReplicas",description="Observed number of unavailable replicas" +// +kubebuilder:printcolumn:name="State",type="string",JSONPath=".spec.state",description="ControlPlaneMachineSet state" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="ControlPlaneMachineSet age" +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ControlPlaneMachineSet struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ControlPlaneMachineSetSpec `json:"spec,omitempty"` + Status ControlPlaneMachineSetStatus `json:"status,omitempty"` +} + +// ControlPlaneMachineSet represents the configuration of the ControlPlaneMachineSet. +type ControlPlaneMachineSetSpec struct { + // State defines whether the ControlPlaneMachineSet is Active or Inactive. + // When Inactive, the ControlPlaneMachineSet will not take any action on the + // state of the Machines within the cluster. + // When Active, the ControlPlaneMachineSet will reconcile the Machines and + // will update the Machines as necessary. + // Once Active, a ControlPlaneMachineSet cannot be made Inactive. To prevent + // further action please remove the ControlPlaneMachineSet. + // +kubebuilder:default:="Inactive" + // +default="Inactive" + // +kubebuilder:validation:XValidation:rule="oldSelf != 'Active' || self == oldSelf",message="state cannot be changed once Active" + // +optional + State ControlPlaneMachineSetState `json:"state,omitempty"` + + // Replicas defines how many Control Plane Machines should be + // created by this ControlPlaneMachineSet. + // This field is immutable and cannot be changed after cluster + // installation. + // The ControlPlaneMachineSet only operates with 3 or 5 node control planes, + // 3 and 5 are the only valid values for this field. + // +kubebuilder:validation:Enum:=3;5 + // +kubebuilder:default:=3 + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="replicas is immutable" + // +kubebuilder:validation:Required + Replicas *int32 `json:"replicas"` + + // Strategy defines how the ControlPlaneMachineSet will update + // Machines when it detects a change to the ProviderSpec. + // +kubebuilder:default:={type: RollingUpdate} + // +optional + Strategy ControlPlaneMachineSetStrategy `json:"strategy,omitempty"` + + // Label selector for Machines. Existing Machines selected by this + // selector will be the ones affected by this ControlPlaneMachineSet. + // It must match the template's labels. + // This field is considered immutable after creation of the resource. + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="selector is immutable" + // +kubebuilder:validation:Required + Selector metav1.LabelSelector `json:"selector"` + + // Template describes the Control Plane Machines that will be created + // by this ControlPlaneMachineSet. + // +kubebuilder:validation:Required + Template ControlPlaneMachineSetTemplate `json:"template"` +} + +// ControlPlaneMachineSetState is an enumeration of the possible states of the +// ControlPlaneMachineSet resource. It allows it to be either Active or Inactive. +// +kubebuilder:validation:Enum:="Active";"Inactive" +type ControlPlaneMachineSetState string + +const ( + // ControlPlaneMachineSetStateActive is the value used to denote the ControlPlaneMachineSet + // should be active and should perform updates as required. + ControlPlaneMachineSetStateActive ControlPlaneMachineSetState = "Active" + + // ControlPlaneMachineSetStateInactive is the value used to denote the ControlPlaneMachineSet + // should be not active and should no perform any updates. + ControlPlaneMachineSetStateInactive ControlPlaneMachineSetState = "Inactive" +) + +// ControlPlaneMachineSetTemplate is a template used by the ControlPlaneMachineSet +// to create the Machines that it will manage in the future. +// +union +// + --- +// + This struct is a discriminated union which allows users to select the type of Machine +// + that the ControlPlaneMachineSet should create and manage. +// + For now, the only supported type is the OpenShift Machine API Machine, but in the future +// + we plan to expand this to allow other Machine types such as Cluster API Machines or a +// + future version of the Machine API Machine. +// +kubebuilder:validation:XValidation:rule="has(self.machineType) && self.machineType == 'machines_v1beta1_machine_openshift_io' ? has(self.machines_v1beta1_machine_openshift_io) : !has(self.machines_v1beta1_machine_openshift_io)",message="machines_v1beta1_machine_openshift_io configuration is required when machineType is machines_v1beta1_machine_openshift_io, and forbidden otherwise" +type ControlPlaneMachineSetTemplate struct { + // MachineType determines the type of Machines that should be managed by the ControlPlaneMachineSet. + // Currently, the only valid value is machines_v1beta1_machine_openshift_io. + // +unionDiscriminator + // +kubebuilder:validation:Required + MachineType ControlPlaneMachineSetMachineType `json:"machineType,omitempty"` + + // OpenShiftMachineV1Beta1Machine defines the template for creating Machines + // from the v1beta1.machine.openshift.io API group. + // +optional + OpenShiftMachineV1Beta1Machine *OpenShiftMachineV1Beta1MachineTemplate `json:"machines_v1beta1_machine_openshift_io,omitempty"` +} + +// ControlPlaneMachineSetMachineType is a enumeration of valid Machine types +// supported by the ControlPlaneMachineSet. +// +kubebuilder:validation:Enum:=machines_v1beta1_machine_openshift_io +type ControlPlaneMachineSetMachineType string + +const ( + // OpenShiftMachineV1Beta1MachineType is the OpenShift Machine API v1beta1 Machine type. + OpenShiftMachineV1Beta1MachineType ControlPlaneMachineSetMachineType = "machines_v1beta1_machine_openshift_io" +) + +// OpenShiftMachineV1Beta1MachineTemplate is a template for the ControlPlaneMachineSet to create +// Machines from the v1beta1.machine.openshift.io API group. +type OpenShiftMachineV1Beta1MachineTemplate struct { + // FailureDomains is the list of failure domains (sometimes called + // availability zones) in which the ControlPlaneMachineSet should balance + // the Control Plane Machines. + // This will be merged into the ProviderSpec given in the template. + // This field is optional on platforms that do not require placement information. + // +optional + FailureDomains FailureDomains `json:"failureDomains,omitempty"` + + // ObjectMeta is the standard object metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // Labels are required to match the ControlPlaneMachineSet selector. + // +kubebuilder:validation:Required + ObjectMeta ControlPlaneMachineSetTemplateObjectMeta `json:"metadata"` + + // Spec contains the desired configuration of the Control Plane Machines. + // The ProviderSpec within contains platform specific details + // for creating the Control Plane Machines. + // The ProviderSe should be complete apart from the platform specific + // failure domain field. This will be overriden when the Machines + // are created based on the FailureDomains field. + // +kubebuilder:validation:Required + Spec machinev1beta1.MachineSpec `json:"spec"` +} + +// ControlPlaneMachineSetTemplateObjectMeta is a subset of the metav1.ObjectMeta struct. +// It allows users to specify labels and annotations that will be copied onto Machines +// created from this template. +type ControlPlaneMachineSetTemplateObjectMeta struct { + // Map of string keys and values that can be used to organize and categorize + // (scope and select) objects. May match selectors of replication controllers + // and services. + // More info: http://kubernetes.io/docs/user-guide/labels. + // This field must contain both the 'machine.openshift.io/cluster-api-machine-role' and 'machine.openshift.io/cluster-api-machine-type' labels, both with a value of 'master'. + // It must also contain a label with the key 'machine.openshift.io/cluster-api-cluster'. + // +kubebuilder:validation:XValidation:rule="'machine.openshift.io/cluster-api-machine-role' in self && self['machine.openshift.io/cluster-api-machine-role'] == 'master'",message="label 'machine.openshift.io/cluster-api-machine-role' is required, and must have value 'master'" + // +kubebuilder:validation:XValidation:rule="'machine.openshift.io/cluster-api-machine-type' in self && self['machine.openshift.io/cluster-api-machine-type'] == 'master'",message="label 'machine.openshift.io/cluster-api-machine-type' is required, and must have value 'master'" + // +kubebuilder:validation:XValidation:rule="'machine.openshift.io/cluster-api-cluster' in self",message="label 'machine.openshift.io/cluster-api-cluster' is required" + // +kubebuilder:validation:Required + Labels map[string]string `json:"labels"` + + // Annotations is an unstructured key value map stored with a resource that may be + // set by external tools to store and retrieve arbitrary metadata. They are not + // queryable and should be preserved when modifying objects. + // More info: http://kubernetes.io/docs/user-guide/annotations + // +optional + Annotations map[string]string `json:"annotations,omitempty"` +} + +// ControlPlaneMachineSetStrategy defines the strategy for applying updates to the +// Control Plane Machines managed by the ControlPlaneMachineSet. +type ControlPlaneMachineSetStrategy struct { + // Type defines the type of update strategy that should be + // used when updating Machines owned by the ControlPlaneMachineSet. + // Valid values are "RollingUpdate" and "OnDelete". + // The current default value is "RollingUpdate". + // +kubebuilder:default:="RollingUpdate" + // +default="RollingUpdate" + // +kubebuilder:validation:Enum:="RollingUpdate";"OnDelete" + // +optional + Type ControlPlaneMachineSetStrategyType `json:"type,omitempty"` + + // This is left as a struct to allow future rolling update + // strategy configuration to be added later. +} + +// ControlPlaneMachineSetStrategyType is an enumeration of different update strategies +// for the Control Plane Machines. +type ControlPlaneMachineSetStrategyType string + +const ( + // RollingUpdate is the default update strategy type for a + // ControlPlaneMachineSet. This will cause the ControlPlaneMachineSet to + // first create a new Machine and wait for this to be Ready + // before removing the Machine chosen for replacement. + RollingUpdate ControlPlaneMachineSetStrategyType = "RollingUpdate" + + // Recreate causes the ControlPlaneMachineSet controller to first + // remove a ControlPlaneMachine before creating its + // replacement. This allows for scenarios with limited capacity + // such as baremetal environments where additional capacity to + // perform rolling updates is not available. + Recreate ControlPlaneMachineSetStrategyType = "Recreate" + + // OnDelete causes the ControlPlaneMachineSet to only replace a + // Machine once it has been marked for deletion. This strategy + // makes the rollout of updated specifications into a manual + // process. This allows users to test new configuration on + // a single Machine without forcing the rollout of all of their + // Control Plane Machines. + OnDelete ControlPlaneMachineSetStrategyType = "OnDelete" +) + +// FailureDomain represents the different configurations required to spread Machines +// across failure domains on different platforms. +// +union +// +kubebuilder:validation:XValidation:rule="has(self.platform) && self.platform == 'AWS' ? has(self.aws) : !has(self.aws)",message="aws configuration is required when platform is AWS, and forbidden otherwise" +// +kubebuilder:validation:XValidation:rule="has(self.platform) && self.platform == 'Azure' ? has(self.azure) : !has(self.azure)",message="azure configuration is required when platform is Azure, and forbidden otherwise" +// +kubebuilder:validation:XValidation:rule="has(self.platform) && self.platform == 'GCP' ? has(self.gcp) : !has(self.gcp)",message="gcp configuration is required when platform is GCP, and forbidden otherwise" +// +kubebuilder:validation:XValidation:rule="has(self.platform) && self.platform == 'OpenStack' ? has(self.openstack) : !has(self.openstack)",message="openstack configuration is required when platform is OpenStack, and forbidden otherwise" +type FailureDomains struct { + // Platform identifies the platform for which the FailureDomain represents. + // Currently supported values are AWS, Azure, and GCP. + // +unionDiscriminator + // +kubebuilder:validation:Required + Platform configv1.PlatformType `json:"platform"` + + // AWS configures failure domain information for the AWS platform. + // +optional + AWS *[]AWSFailureDomain `json:"aws,omitempty"` + + // Azure configures failure domain information for the Azure platform. + // +optional + Azure *[]AzureFailureDomain `json:"azure,omitempty"` + + // GCP configures failure domain information for the GCP platform. + // +optional + GCP *[]GCPFailureDomain `json:"gcp,omitempty"` + + // OpenStack configures failure domain information for the OpenStack platform. + // +optional + // + // + --- + // + Unlike other platforms, OpenStack failure domains can be empty. + // + Some OpenStack deployments may not have availability zones or root volumes. + // + Therefore we'll check the length of the list to determine if it's empty instead + // + of nil if it would be a pointer. + // +optional + OpenStack []OpenStackFailureDomain `json:"openstack,omitempty"` +} + +// AWSFailureDomain configures failure domain information for the AWS platform. +// +kubebuilder:validation:MinProperties:=1 +type AWSFailureDomain struct { + // Subnet is a reference to the subnet to use for this instance. + // +optional + Subnet *AWSResourceReference `json:"subnet,omitempty"` + + // Placement configures the placement information for this instance. + // +optional + Placement AWSFailureDomainPlacement `json:"placement,omitempty"` +} + +// AWSFailureDomainPlacement configures the placement information for the AWSFailureDomain. +type AWSFailureDomainPlacement struct { + // AvailabilityZone is the availability zone of the instance. + // +kubebuilder:validation:Required + AvailabilityZone string `json:"availabilityZone"` +} + +// AzureFailureDomain configures failure domain information for the Azure platform. +type AzureFailureDomain struct { + // Availability Zone for the virtual machine. + // If nil, the virtual machine should be deployed to no zone. + // +kubebuilder:validation:Required + Zone string `json:"zone"` +} + +// GCPFailureDomain configures failure domain information for the GCP platform +type GCPFailureDomain struct { + // Zone is the zone in which the GCP machine provider will create the VM. + // +kubebuilder:validation:Required + Zone string `json:"zone"` +} + +// OpenStackFailureDomain configures failure domain information for the OpenStack platform. +// +kubebuilder:validation:MinProperties:=1 +// +kubebuilder:validation:XValidation:rule="!has(self.availabilityZone) || !has(self.rootVolume) || has(self.rootVolume.availabilityZone)",message="rootVolume.availabilityZone is required when availabilityZone is set" +type OpenStackFailureDomain struct { + // availabilityZone is the nova availability zone in which the OpenStack machine provider will create the VM. + // If not specified, the VM will be created in the default availability zone specified in the nova configuration. + // Availability zone names must NOT contain : since it is used by admin users to specify hosts where instances + // are launched in server creation. Also, it must not contain spaces otherwise it will lead to node that belongs + // to this availability zone register failure, see kubernetes/cloud-provider-openstack#1379 for further information. + // The maximum length of availability zone name is 63 as per labels limits. + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:Pattern=`^[^: ]*$` + // +kubebuilder:validation:MaxLength=63 + // +optional + AvailabilityZone string `json:"availabilityZone,omitempty"` + + // rootVolume contains settings that will be used by the OpenStack machine provider to create the root volume attached to the VM. + // If not specified, no root volume will be created. + // + // + --- + // + RootVolume must be a pointer to allow us to require at least one valid property is set within the failure domain. + // + If it were a reference then omitempty doesn't work and the minProperties validations are no longer valid. + // +optional + RootVolume *RootVolume `json:"rootVolume,omitempty"` +} + +// RootVolume represents the volume metadata to boot from. +// The original RootVolume struct is defined in the v1alpha1 but it's not best practice to use it directly here so we define a new one +// that should stay in sync with the original one. +type RootVolume struct { + // availabilityZone specifies the Cinder availability zone where the root volume will be created. + // If not specifified, the root volume will be created in the availability zone specified by the volume type in the cinder configuration. + // If the volume type (configured in the OpenStack cluster) does not specify an availability zone, the root volume will be created in the default availability + // zone specified in the cinder configuration. See https://docs.openstack.org/cinder/latest/admin/availability-zone-type.html for more details. + // If the OpenStack cluster is deployed with the cross_az_attach configuration option set to false, the root volume will have to be in the same + // availability zone as the VM (defined by OpenStackFailureDomain.AvailabilityZone). + // Availability zone names must NOT contain spaces otherwise it will lead to volume that belongs to this availability zone register failure, + // see kubernetes/cloud-provider-openstack#1379 for further information. + // The maximum length of availability zone name is 63 as per labels limits. + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=63 + // +kubebuilder:validation:Pattern=`^[^ ]*$` + // +optional + AvailabilityZone string `json:"availabilityZone,omitempty"` + + // volumeType specifies the type of the root volume that will be provisioned. + // The maximum length of a volume type name is 255 characters, as per the OpenStack limit. + // + --- + // + Historically, the installer has always required a volume type to be specified when deploying + // + the control plane with a root volume. This is because the default volume type in Cinder is not guaranteed + // + to be available, therefore we prefer the user to be explicit about the volume type to use. + // + We apply the same logic in CPMS: if the failure domain specifies a root volume, we require the user to specify a volume type. + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=255 + VolumeType string `json:"volumeType"` +} + +// ControlPlaneMachineSetStatus represents the status of the ControlPlaneMachineSet CRD. +type ControlPlaneMachineSetStatus struct { + // Conditions represents the observations of the ControlPlaneMachineSet's current state. + // Known .status.conditions.type are: Available, Degraded and Progressing. + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` + + // ObservedGeneration is the most recent generation observed for this + // ControlPlaneMachineSet. It corresponds to the ControlPlaneMachineSets's generation, + // which is updated on mutation by the API Server. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + + // Replicas is the number of Control Plane Machines created by the + // ControlPlaneMachineSet controller. + // Note that during update operations this value may differ from the + // desired replica count. + // +optional + Replicas int32 `json:"replicas,omitempty"` + + // ReadyReplicas is the number of Control Plane Machines created by the + // ControlPlaneMachineSet controller which are ready. + // Note that this value may be higher than the desired number of replicas + // while rolling updates are in-progress. + // +optional + ReadyReplicas int32 `json:"readyReplicas,omitempty"` + + // UpdatedReplicas is the number of non-terminated Control Plane Machines + // created by the ControlPlaneMachineSet controller that have the desired + // provider spec and are ready. + // This value is set to 0 when a change is detected to the desired spec. + // When the update strategy is RollingUpdate, this will also coincide + // with starting the process of updating the Machines. + // When the update strategy is OnDelete, this value will remain at 0 until + // a user deletes an existing replica and its replacement has become ready. + // +optional + UpdatedReplicas int32 `json:"updatedReplicas,omitempty"` + + // UnavailableReplicas is the number of Control Plane Machines that are + // still required before the ControlPlaneMachineSet reaches the desired + // available capacity. When this value is non-zero, the number of + // ReadyReplicas is less than the desired Replicas. + // +optional + UnavailableReplicas int32 `json:"unavailableReplicas,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ControlPlaneMachineSetList contains a list of ControlPlaneMachineSet +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ControlPlaneMachineSetList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty"` + + Items []ControlPlaneMachineSet `json:"items"` +} diff --git a/vendor/github.com/openshift/api/machine/v1/types_nutanixprovider.go b/vendor/github.com/openshift/api/machine/v1/types_nutanixprovider.go new file mode 100644 index 000000000..fc7db6be6 --- /dev/null +++ b/vendor/github.com/openshift/api/machine/v1/types_nutanixprovider.go @@ -0,0 +1,169 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// NutanixMachineProviderConfig is the Schema for the nutanixmachineproviderconfigs API +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +// +k8s:openapi-gen=true +type NutanixMachineProviderConfig struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty"` + + // cluster is to identify the cluster (the Prism Element under management + // of the Prism Central), in which the Machine's VM will be created. + // The cluster identifier (uuid or name) can be obtained from the Prism Central console + // or using the prism_central API. + // +kubebuilder:validation:Required + Cluster NutanixResourceIdentifier `json:"cluster"` + + // image is to identify the rhcos image uploaded to the Prism Central (PC) + // The image identifier (uuid or name) can be obtained from the Prism Central console + // or using the prism_central API. + // +kubebuilder:validation:Required + Image NutanixResourceIdentifier `json:"image"` + + // subnets holds a list of identifiers (one or more) of the cluster's network subnets + // for the Machine's VM to connect to. The subnet identifiers (uuid or name) can be + // obtained from the Prism Central console or using the prism_central API. + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinItems=1 + Subnets []NutanixResourceIdentifier `json:"subnets"` + + // vcpusPerSocket is the number of vCPUs per socket of the VM + // +kubebuilder:validation:Required + // +kubebuilder:validation:Minimum=1 + VCPUsPerSocket int32 `json:"vcpusPerSocket"` + + // vcpuSockets is the number of vCPU sockets of the VM + // +kubebuilder:validation:Required + // +kubebuilder:validation:Minimum=1 + VCPUSockets int32 `json:"vcpuSockets"` + + // memorySize is the memory size (in Quantity format) of the VM + // The minimum memorySize is 2Gi bytes + // +kubebuilder:validation:Required + MemorySize resource.Quantity `json:"memorySize"` + + // systemDiskSize is size (in Quantity format) of the system disk of the VM + // The minimum systemDiskSize is 20Gi bytes + // +kubebuilder:validation:Required + SystemDiskSize resource.Quantity `json:"systemDiskSize"` + + // bootType indicates the boot type (Legacy, UEFI or SecureBoot) the Machine's VM uses to boot. + // If this field is empty or omitted, the VM will use the default boot type "Legacy" to boot. + // "SecureBoot" depends on "UEFI" boot, i.e., enabling "SecureBoot" means that "UEFI" boot is also enabled. + // +kubebuilder:validation:Enum="";Legacy;UEFI;SecureBoot + // +optional + BootType NutanixBootType `json:"bootType"` + + // project optionally identifies a Prism project for the Machine's VM to associate with. + // +optional + Project NutanixResourceIdentifier `json:"project"` + + // categories optionally adds one or more prism categories (each with key and value) for + // the Machine's VM to associate with. All the category key and value pairs specified must + // already exist in the prism central. + // +listType=map + // +listMapKey=key + // +optional + Categories []NutanixCategory `json:"categories"` + + // userDataSecret is a local reference to a secret that contains the + // UserData to apply to the VM + UserDataSecret *corev1.LocalObjectReference `json:"userDataSecret,omitempty"` + + // credentialsSecret is a local reference to a secret that contains the + // credentials data to access Nutanix PC client + // +kubebuilder:validation:Required + CredentialsSecret *corev1.LocalObjectReference `json:"credentialsSecret"` +} + +// NutanixCategory identifies a pair of prism category key and value +type NutanixCategory struct { + // key is the prism category key name + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=64 + // +kubebuilder:validation:Required + Key string `json:"key"` + + // value is the prism category value associated with the key + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=64 + // +kubebuilder:validation:Required + Value string `json:"value"` +} + +// NutanixBootType is an enumeration of different boot types for Nutanix VM. +type NutanixBootType string + +const ( + // NutanixLegacyBoot is the legacy BIOS boot type + NutanixLegacyBoot NutanixBootType = "Legacy" + + // NutanixUEFIBoot is the UEFI boot type + NutanixUEFIBoot NutanixBootType = "UEFI" + + // NutanixSecureBoot is the Secure boot type + NutanixSecureBoot NutanixBootType = "SecureBoot" +) + +// NutanixIdentifierType is an enumeration of different resource identifier types. +type NutanixIdentifierType string + +const ( + // NutanixIdentifierUUID is a resource identifier identifying the object by UUID. + NutanixIdentifierUUID NutanixIdentifierType = "uuid" + + // NutanixIdentifierName is a resource identifier identifying the object by Name. + NutanixIdentifierName NutanixIdentifierType = "name" +) + +// NutanixResourceIdentifier holds the identity of a Nutanix PC resource (cluster, image, subnet, etc.) +// +union +type NutanixResourceIdentifier struct { + // Type is the identifier type to use for this resource. + // +unionDiscriminator + // +kubebuilder:validation:Required + // +kubebuilder:validation:Enum:=uuid;name + Type NutanixIdentifierType `json:"type"` + + // uuid is the UUID of the resource in the PC. + // +optional + UUID *string `json:"uuid,omitempty"` + + // name is the resource name in the PC + // +optional + Name *string `json:"name,omitempty"` +} + +// NutanixMachineProviderStatus is the type that will be embedded in a Machine.Status.ProviderStatus field. +// It contains nutanix-specific status information. +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type NutanixMachineProviderStatus struct { + metav1.TypeMeta `json:",inline"` + + // conditions is a set of conditions associated with the Machine to indicate + // errors or other status + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` + + // vmUUID is the Machine associated VM's UUID + // The field is missing before the VM is created. + // Once the VM is created, the field is filled with the VM's UUID and it will not change. + // The vmUUID is used to find the VM when updating the Machine status, + // and to delete the VM when the Machine is deleted. + // +optional + VmUUID *string `json:"vmUUID,omitempty"` +} diff --git a/vendor/github.com/openshift/api/machine/v1/types_powervsprovider.go b/vendor/github.com/openshift/api/machine/v1/types_powervsprovider.go new file mode 100644 index 000000000..c131139c5 --- /dev/null +++ b/vendor/github.com/openshift/api/machine/v1/types_powervsprovider.go @@ -0,0 +1,227 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +// PowerVSResourceType enum attribute to identify the type of resource reference +type PowerVSResourceType string + +// PowerVSProcessorType enum attribute to identify the PowerVS instance processor type +type PowerVSProcessorType string + +// IBMVPCLoadBalancerType is the type of LoadBalancer to use when registering +// an instance with load balancers specified in LoadBalancerNames +type IBMVPCLoadBalancerType string + +// ApplicationLoadBalancerType is possible values for IBMVPCLoadBalancerType. +const ( + ApplicationLoadBalancerType IBMVPCLoadBalancerType = "Application" // Application Load Balancer for VPC (ALB) +) + +const ( + // PowerVSResourceTypeID enum property to identify an ID type resource reference + PowerVSResourceTypeID PowerVSResourceType = "ID" + // PowerVSResourceTypeName enum property to identify a Name type resource reference + PowerVSResourceTypeName PowerVSResourceType = "Name" + // PowerVSResourceTypeRegEx enum property to identify a tags type resource reference + PowerVSResourceTypeRegEx PowerVSResourceType = "RegEx" + // PowerVSProcessorTypeDedicated enum property to identify a Dedicated Power VS processor type + PowerVSProcessorTypeDedicated PowerVSProcessorType = "Dedicated" + // PowerVSProcessorTypeShared enum property to identify a Shared Power VS processor type + PowerVSProcessorTypeShared PowerVSProcessorType = "Shared" + // PowerVSProcessorTypeCapped enum property to identify a Capped Power VS processor type + PowerVSProcessorTypeCapped PowerVSProcessorType = "Capped" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PowerVSMachineProviderConfig is the type that will be embedded in a Machine.Spec.ProviderSpec field +// for a PowerVS virtual machine. It is used by the PowerVS machine actuator to create a single Machine. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +// +k8s:openapi-gen=true +type PowerVSMachineProviderConfig struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + + // userDataSecret contains a local reference to a secret that contains the + // UserData to apply to the instance. + // +optional + UserDataSecret *PowerVSSecretReference `json:"userDataSecret,omitempty"` + + // credentialsSecret is a reference to the secret with IBM Cloud credentials. + // +optional + CredentialsSecret *PowerVSSecretReference `json:"credentialsSecret,omitempty"` + + // serviceInstance is the reference to the Power VS service on which the server instance(VM) will be created. + // Power VS service is a container for all Power VS instances at a specific geographic region. + // serviceInstance can be created via IBM Cloud catalog or CLI. + // supported serviceInstance identifier in PowerVSResource are Name and ID and that can be obtained from IBM Cloud UI or IBM Cloud cli. + // More detail about Power VS service instance. + // https://cloud.ibm.com/docs/power-iaas?topic=power-iaas-creating-power-virtual-server + // +kubebuilder:validation:=Required + ServiceInstance PowerVSResource `json:"serviceInstance"` + + // image is to identify the rhcos image uploaded to IBM COS bucket which is used to create the instance. + // supported image identifier in PowerVSResource are Name and ID and that can be obtained from IBM Cloud UI or IBM Cloud cli. + // +kubebuilder:validation:=Required + Image PowerVSResource `json:"image"` + + // network is the reference to the Network to use for this instance. + // supported network identifier in PowerVSResource are Name, ID and RegEx and that can be obtained from IBM Cloud UI or IBM Cloud cli. + // +kubebuilder:validation:=Required + Network PowerVSResource `json:"network"` + + // keyPairName is the name of the KeyPair to use for SSH. + // The key pair will be exposed to the instance via the instance metadata service. + // On boot, the OS will copy the public keypair into the authorized keys for the core user. + // +kubebuilder:validation:=Required + KeyPairName string `json:"keyPairName"` + + // systemType is the System type used to host the instance. + // systemType determines the number of cores and memory that is available. + // Few of the supported SystemTypes are s922,e880,e980. + // e880 systemType available only in Dallas Datacenters. + // e980 systemType available in Datacenters except Dallas and Washington. + // When omitted, this means that the user has no opinion and the platform is left to choose a + // reasonable default, which is subject to change over time. The current default is s922 which is generally available. + // + This is not an enum because we expect other values to be added later which should be supported implicitly. + // +optional + SystemType string `json:"systemType,omitempty"` + + // processorType is the VM instance processor type. + // It must be set to one of the following values: Dedicated, Capped or Shared. + // Dedicated: resources are allocated for a specific client, The hypervisor makes a 1:1 binding of a partition’s processor to a physical processor core. + // Shared: Shared among other clients. + // Capped: Shared, but resources do not expand beyond those that are requested, the amount of CPU time is Capped to the value specified for the entitlement. + // if the processorType is selected as Dedicated, then processors value cannot be fractional. + // When omitted, this means that the user has no opinion and the platform is left to choose a + // reasonable default, which is subject to change over time. The current default is Shared. + // +kubebuilder:validation:Enum:="Dedicated";"Shared";"Capped";"" + // +optional + ProcessorType PowerVSProcessorType `json:"processorType,omitempty"` + + // processors is the number of virtual processors in a virtual machine. + // when the processorType is selected as Dedicated the processors value cannot be fractional. + // maximum value for the Processors depends on the selected SystemType. + // when SystemType is set to e880 or e980 maximum Processors value is 143. + // when SystemType is set to s922 maximum Processors value is 15. + // minimum value for Processors depends on the selected ProcessorType. + // when ProcessorType is set as Shared or Capped, The minimum processors is 0.5. + // when ProcessorType is set as Dedicated, The minimum processors is 1. + // When omitted, this means that the user has no opinion and the platform is left to choose a + // reasonable default, which is subject to change over time. The default is set based on the selected ProcessorType. + // when ProcessorType selected as Dedicated, the default is set to 1. + // when ProcessorType selected as Shared or Capped, the default is set to 0.5. + // +optional + Processors intstr.IntOrString `json:"processors,omitempty"` + + // memoryGiB is the size of a virtual machine's memory, in GiB. + // maximum value for the MemoryGiB depends on the selected SystemType. + // when SystemType is set to e880 maximum MemoryGiB value is 7463 GiB. + // when SystemType is set to e980 maximum MemoryGiB value is 15307 GiB. + // when SystemType is set to s922 maximum MemoryGiB value is 942 GiB. + // The minimum memory is 32 GiB. + // When omitted, this means the user has no opinion and the platform is left to choose a reasonable + // default, which is subject to change over time. The current default is 32. + // +optional + MemoryGiB int32 `json:"memoryGiB,omitempty"` + + // loadBalancers is the set of load balancers to which the new control plane instance + // should be added once it is created. + // +optional + LoadBalancers []LoadBalancerReference `json:"loadBalancers,omitempty"` +} + +// PowerVSResource is a reference to a specific PowerVS resource by ID, Name or RegEx +// Only one of ID, Name or RegEx may be specified. Specifying more than one will result in +// a validation error. +// +union +type PowerVSResource struct { + // Type identifies the resource type for this entry. + // Valid values are ID, Name and RegEx + // +kubebuilder:validation:Enum:=ID;Name;RegEx + // +optional + Type PowerVSResourceType `json:"type,omitempty"` + // ID of resource + // +optional + ID *string `json:"id,omitempty"` + // Name of resource + // +optional + Name *string `json:"name,omitempty"` + // Regex to find resource + // Regex contains the pattern to match to find a resource + // +optional + RegEx *string `json:"regex,omitempty"` +} + +// PowerVSMachineProviderStatus is the type that will be embedded in a Machine.Status.ProviderStatus field. +// It contains PowerVS-specific status information. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type PowerVSMachineProviderStatus struct { + metav1.TypeMeta `json:",inline"` + + // conditions is a set of conditions associated with the Machine to indicate + // errors or other status + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` + + // instanceId is the instance ID of the machine created in PowerVS + // instanceId uniquely identifies a Power VS server instance(VM) under a Power VS service. + // This will help in updating or deleting a VM in Power VS Cloud + // +optional + InstanceID *string `json:"instanceId,omitempty"` + + // serviceInstanceID is the reference to the Power VS ServiceInstance on which the machine instance will be created. + // serviceInstanceID uniquely identifies the Power VS service + // By setting serviceInstanceID it will become easy and efficient to fetch a server instance(VM) within Power VS Cloud. + // +optional + ServiceInstanceID *string `json:"serviceInstanceID,omitempty"` + + // instanceState is the state of the PowerVS instance for this machine + // Possible instance states are Active, Build, ShutOff, Reboot + // This is used to display additional information to user regarding instance current state + // +optional + InstanceState *string `json:"instanceState,omitempty"` +} + +// PowerVSSecretReference contains enough information to locate the +// referenced secret inside the same namespace. +// +structType=atomic +type PowerVSSecretReference struct { + // Name of the secret. + // +optional + Name string `json:"name,omitempty"` +} + +// LoadBalancerReference is a reference to a load balancer on IBM Cloud virtual private cloud(VPC). +type LoadBalancerReference struct { + // name of the LoadBalancer in IBM Cloud VPC. + // The name should be between 1 and 63 characters long and may consist of lowercase alphanumeric characters and hyphens only. + // The value must not end with a hyphen. + // It is a reference to existing LoadBalancer created by openshift installer component. + // +kubebuilder:validation:Required + // +kubebuilder:validation:Pattern=`^([a-z]|[a-z][-a-z0-9]*[a-z0-9]|[0-9][-a-z0-9]*([a-z]|[-a-z][-a-z0-9]*[a-z0-9]))$` + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=63 + Name string `json:"name"` + // type of the LoadBalancer service supported by IBM Cloud VPC. + // Currently, only Application LoadBalancer is supported. + // More details about Application LoadBalancer + // https://cloud.ibm.com/docs/vpc?topic=vpc-load-balancers-about&interface=ui + // Supported values are Application. + // +kubebuilder:validation:Required + // +kubebuilder:validation:Enum:="Application" + Type IBMVPCLoadBalancerType `json:"type"` +} diff --git a/vendor/github.com/openshift/api/machine/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/machine/v1/zz_generated.deepcopy.go new file mode 100644 index 000000000..ca3184327 --- /dev/null +++ b/vendor/github.com/openshift/api/machine/v1/zz_generated.deepcopy.go @@ -0,0 +1,942 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSFailureDomain) DeepCopyInto(out *AWSFailureDomain) { + *out = *in + if in.Subnet != nil { + in, out := &in.Subnet, &out.Subnet + *out = new(AWSResourceReference) + (*in).DeepCopyInto(*out) + } + out.Placement = in.Placement + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSFailureDomain. +func (in *AWSFailureDomain) DeepCopy() *AWSFailureDomain { + if in == nil { + return nil + } + out := new(AWSFailureDomain) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSFailureDomainPlacement) DeepCopyInto(out *AWSFailureDomainPlacement) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSFailureDomainPlacement. +func (in *AWSFailureDomainPlacement) DeepCopy() *AWSFailureDomainPlacement { + if in == nil { + return nil + } + out := new(AWSFailureDomainPlacement) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSResourceFilter) DeepCopyInto(out *AWSResourceFilter) { + *out = *in + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSResourceFilter. +func (in *AWSResourceFilter) DeepCopy() *AWSResourceFilter { + if in == nil { + return nil + } + out := new(AWSResourceFilter) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSResourceReference) DeepCopyInto(out *AWSResourceReference) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.ARN != nil { + in, out := &in.ARN, &out.ARN + *out = new(string) + **out = **in + } + if in.Filters != nil { + in, out := &in.Filters, &out.Filters + *out = new([]AWSResourceFilter) + if **in != nil { + in, out := *in, *out + *out = make([]AWSResourceFilter, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSResourceReference. +func (in *AWSResourceReference) DeepCopy() *AWSResourceReference { + if in == nil { + return nil + } + out := new(AWSResourceReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AlibabaCloudMachineProviderConfig) DeepCopyInto(out *AlibabaCloudMachineProviderConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.DataDisks != nil { + in, out := &in.DataDisks, &out.DataDisks + *out = make([]DataDiskProperties, len(*in)) + copy(*out, *in) + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]AlibabaResourceReference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + out.Bandwidth = in.Bandwidth + out.SystemDisk = in.SystemDisk + in.VSwitch.DeepCopyInto(&out.VSwitch) + in.ResourceGroup.DeepCopyInto(&out.ResourceGroup) + if in.UserDataSecret != nil { + in, out := &in.UserDataSecret, &out.UserDataSecret + *out = new(corev1.LocalObjectReference) + **out = **in + } + if in.CredentialsSecret != nil { + in, out := &in.CredentialsSecret, &out.CredentialsSecret + *out = new(corev1.LocalObjectReference) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make([]Tag, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlibabaCloudMachineProviderConfig. +func (in *AlibabaCloudMachineProviderConfig) DeepCopy() *AlibabaCloudMachineProviderConfig { + if in == nil { + return nil + } + out := new(AlibabaCloudMachineProviderConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AlibabaCloudMachineProviderConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AlibabaCloudMachineProviderConfigList) DeepCopyInto(out *AlibabaCloudMachineProviderConfigList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]AlibabaCloudMachineProviderConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlibabaCloudMachineProviderConfigList. +func (in *AlibabaCloudMachineProviderConfigList) DeepCopy() *AlibabaCloudMachineProviderConfigList { + if in == nil { + return nil + } + out := new(AlibabaCloudMachineProviderConfigList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AlibabaCloudMachineProviderConfigList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AlibabaCloudMachineProviderStatus) DeepCopyInto(out *AlibabaCloudMachineProviderStatus) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.InstanceID != nil { + in, out := &in.InstanceID, &out.InstanceID + *out = new(string) + **out = **in + } + if in.InstanceState != nil { + in, out := &in.InstanceState, &out.InstanceState + *out = new(string) + **out = **in + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlibabaCloudMachineProviderStatus. +func (in *AlibabaCloudMachineProviderStatus) DeepCopy() *AlibabaCloudMachineProviderStatus { + if in == nil { + return nil + } + out := new(AlibabaCloudMachineProviderStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AlibabaCloudMachineProviderStatus) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AlibabaResourceReference) DeepCopyInto(out *AlibabaResourceReference) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = new([]Tag) + if **in != nil { + in, out := *in, *out + *out = make([]Tag, len(*in)) + copy(*out, *in) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlibabaResourceReference. +func (in *AlibabaResourceReference) DeepCopy() *AlibabaResourceReference { + if in == nil { + return nil + } + out := new(AlibabaResourceReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureFailureDomain) DeepCopyInto(out *AzureFailureDomain) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureFailureDomain. +func (in *AzureFailureDomain) DeepCopy() *AzureFailureDomain { + if in == nil { + return nil + } + out := new(AzureFailureDomain) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BandwidthProperties) DeepCopyInto(out *BandwidthProperties) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BandwidthProperties. +func (in *BandwidthProperties) DeepCopy() *BandwidthProperties { + if in == nil { + return nil + } + out := new(BandwidthProperties) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControlPlaneMachineSet) DeepCopyInto(out *ControlPlaneMachineSet) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControlPlaneMachineSet. +func (in *ControlPlaneMachineSet) DeepCopy() *ControlPlaneMachineSet { + if in == nil { + return nil + } + out := new(ControlPlaneMachineSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ControlPlaneMachineSet) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControlPlaneMachineSetList) DeepCopyInto(out *ControlPlaneMachineSetList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ControlPlaneMachineSet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControlPlaneMachineSetList. +func (in *ControlPlaneMachineSetList) DeepCopy() *ControlPlaneMachineSetList { + if in == nil { + return nil + } + out := new(ControlPlaneMachineSetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ControlPlaneMachineSetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControlPlaneMachineSetSpec) DeepCopyInto(out *ControlPlaneMachineSetSpec) { + *out = *in + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } + out.Strategy = in.Strategy + in.Selector.DeepCopyInto(&out.Selector) + in.Template.DeepCopyInto(&out.Template) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControlPlaneMachineSetSpec. +func (in *ControlPlaneMachineSetSpec) DeepCopy() *ControlPlaneMachineSetSpec { + if in == nil { + return nil + } + out := new(ControlPlaneMachineSetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControlPlaneMachineSetStatus) DeepCopyInto(out *ControlPlaneMachineSetStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControlPlaneMachineSetStatus. +func (in *ControlPlaneMachineSetStatus) DeepCopy() *ControlPlaneMachineSetStatus { + if in == nil { + return nil + } + out := new(ControlPlaneMachineSetStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControlPlaneMachineSetStrategy) DeepCopyInto(out *ControlPlaneMachineSetStrategy) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControlPlaneMachineSetStrategy. +func (in *ControlPlaneMachineSetStrategy) DeepCopy() *ControlPlaneMachineSetStrategy { + if in == nil { + return nil + } + out := new(ControlPlaneMachineSetStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControlPlaneMachineSetTemplate) DeepCopyInto(out *ControlPlaneMachineSetTemplate) { + *out = *in + if in.OpenShiftMachineV1Beta1Machine != nil { + in, out := &in.OpenShiftMachineV1Beta1Machine, &out.OpenShiftMachineV1Beta1Machine + *out = new(OpenShiftMachineV1Beta1MachineTemplate) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControlPlaneMachineSetTemplate. +func (in *ControlPlaneMachineSetTemplate) DeepCopy() *ControlPlaneMachineSetTemplate { + if in == nil { + return nil + } + out := new(ControlPlaneMachineSetTemplate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControlPlaneMachineSetTemplateObjectMeta) DeepCopyInto(out *ControlPlaneMachineSetTemplateObjectMeta) { + *out = *in + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControlPlaneMachineSetTemplateObjectMeta. +func (in *ControlPlaneMachineSetTemplateObjectMeta) DeepCopy() *ControlPlaneMachineSetTemplateObjectMeta { + if in == nil { + return nil + } + out := new(ControlPlaneMachineSetTemplateObjectMeta) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataDiskProperties) DeepCopyInto(out *DataDiskProperties) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataDiskProperties. +func (in *DataDiskProperties) DeepCopy() *DataDiskProperties { + if in == nil { + return nil + } + out := new(DataDiskProperties) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FailureDomains) DeepCopyInto(out *FailureDomains) { + *out = *in + if in.AWS != nil { + in, out := &in.AWS, &out.AWS + *out = new([]AWSFailureDomain) + if **in != nil { + in, out := *in, *out + *out = make([]AWSFailureDomain, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + } + if in.Azure != nil { + in, out := &in.Azure, &out.Azure + *out = new([]AzureFailureDomain) + if **in != nil { + in, out := *in, *out + *out = make([]AzureFailureDomain, len(*in)) + copy(*out, *in) + } + } + if in.GCP != nil { + in, out := &in.GCP, &out.GCP + *out = new([]GCPFailureDomain) + if **in != nil { + in, out := *in, *out + *out = make([]GCPFailureDomain, len(*in)) + copy(*out, *in) + } + } + if in.OpenStack != nil { + in, out := &in.OpenStack, &out.OpenStack + *out = make([]OpenStackFailureDomain, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FailureDomains. +func (in *FailureDomains) DeepCopy() *FailureDomains { + if in == nil { + return nil + } + out := new(FailureDomains) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GCPFailureDomain) DeepCopyInto(out *GCPFailureDomain) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPFailureDomain. +func (in *GCPFailureDomain) DeepCopy() *GCPFailureDomain { + if in == nil { + return nil + } + out := new(GCPFailureDomain) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoadBalancerReference) DeepCopyInto(out *LoadBalancerReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancerReference. +func (in *LoadBalancerReference) DeepCopy() *LoadBalancerReference { + if in == nil { + return nil + } + out := new(LoadBalancerReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NutanixCategory) DeepCopyInto(out *NutanixCategory) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NutanixCategory. +func (in *NutanixCategory) DeepCopy() *NutanixCategory { + if in == nil { + return nil + } + out := new(NutanixCategory) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NutanixMachineProviderConfig) DeepCopyInto(out *NutanixMachineProviderConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Cluster.DeepCopyInto(&out.Cluster) + in.Image.DeepCopyInto(&out.Image) + if in.Subnets != nil { + in, out := &in.Subnets, &out.Subnets + *out = make([]NutanixResourceIdentifier, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + out.MemorySize = in.MemorySize.DeepCopy() + out.SystemDiskSize = in.SystemDiskSize.DeepCopy() + in.Project.DeepCopyInto(&out.Project) + if in.Categories != nil { + in, out := &in.Categories, &out.Categories + *out = make([]NutanixCategory, len(*in)) + copy(*out, *in) + } + if in.UserDataSecret != nil { + in, out := &in.UserDataSecret, &out.UserDataSecret + *out = new(corev1.LocalObjectReference) + **out = **in + } + if in.CredentialsSecret != nil { + in, out := &in.CredentialsSecret, &out.CredentialsSecret + *out = new(corev1.LocalObjectReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NutanixMachineProviderConfig. +func (in *NutanixMachineProviderConfig) DeepCopy() *NutanixMachineProviderConfig { + if in == nil { + return nil + } + out := new(NutanixMachineProviderConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NutanixMachineProviderConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NutanixMachineProviderStatus) DeepCopyInto(out *NutanixMachineProviderStatus) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VmUUID != nil { + in, out := &in.VmUUID, &out.VmUUID + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NutanixMachineProviderStatus. +func (in *NutanixMachineProviderStatus) DeepCopy() *NutanixMachineProviderStatus { + if in == nil { + return nil + } + out := new(NutanixMachineProviderStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NutanixMachineProviderStatus) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NutanixResourceIdentifier) DeepCopyInto(out *NutanixResourceIdentifier) { + *out = *in + if in.UUID != nil { + in, out := &in.UUID, &out.UUID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NutanixResourceIdentifier. +func (in *NutanixResourceIdentifier) DeepCopy() *NutanixResourceIdentifier { + if in == nil { + return nil + } + out := new(NutanixResourceIdentifier) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenShiftMachineV1Beta1MachineTemplate) DeepCopyInto(out *OpenShiftMachineV1Beta1MachineTemplate) { + *out = *in + in.FailureDomains.DeepCopyInto(&out.FailureDomains) + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenShiftMachineV1Beta1MachineTemplate. +func (in *OpenShiftMachineV1Beta1MachineTemplate) DeepCopy() *OpenShiftMachineV1Beta1MachineTemplate { + if in == nil { + return nil + } + out := new(OpenShiftMachineV1Beta1MachineTemplate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenStackFailureDomain) DeepCopyInto(out *OpenStackFailureDomain) { + *out = *in + if in.RootVolume != nil { + in, out := &in.RootVolume, &out.RootVolume + *out = new(RootVolume) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenStackFailureDomain. +func (in *OpenStackFailureDomain) DeepCopy() *OpenStackFailureDomain { + if in == nil { + return nil + } + out := new(OpenStackFailureDomain) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PowerVSMachineProviderConfig) DeepCopyInto(out *PowerVSMachineProviderConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.UserDataSecret != nil { + in, out := &in.UserDataSecret, &out.UserDataSecret + *out = new(PowerVSSecretReference) + **out = **in + } + if in.CredentialsSecret != nil { + in, out := &in.CredentialsSecret, &out.CredentialsSecret + *out = new(PowerVSSecretReference) + **out = **in + } + in.ServiceInstance.DeepCopyInto(&out.ServiceInstance) + in.Image.DeepCopyInto(&out.Image) + in.Network.DeepCopyInto(&out.Network) + out.Processors = in.Processors + if in.LoadBalancers != nil { + in, out := &in.LoadBalancers, &out.LoadBalancers + *out = make([]LoadBalancerReference, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PowerVSMachineProviderConfig. +func (in *PowerVSMachineProviderConfig) DeepCopy() *PowerVSMachineProviderConfig { + if in == nil { + return nil + } + out := new(PowerVSMachineProviderConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PowerVSMachineProviderConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PowerVSMachineProviderStatus) DeepCopyInto(out *PowerVSMachineProviderStatus) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.InstanceID != nil { + in, out := &in.InstanceID, &out.InstanceID + *out = new(string) + **out = **in + } + if in.ServiceInstanceID != nil { + in, out := &in.ServiceInstanceID, &out.ServiceInstanceID + *out = new(string) + **out = **in + } + if in.InstanceState != nil { + in, out := &in.InstanceState, &out.InstanceState + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PowerVSMachineProviderStatus. +func (in *PowerVSMachineProviderStatus) DeepCopy() *PowerVSMachineProviderStatus { + if in == nil { + return nil + } + out := new(PowerVSMachineProviderStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PowerVSMachineProviderStatus) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PowerVSResource) DeepCopyInto(out *PowerVSResource) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = new(string) + **out = **in + } + if in.RegEx != nil { + in, out := &in.RegEx, &out.RegEx + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PowerVSResource. +func (in *PowerVSResource) DeepCopy() *PowerVSResource { + if in == nil { + return nil + } + out := new(PowerVSResource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PowerVSSecretReference) DeepCopyInto(out *PowerVSSecretReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PowerVSSecretReference. +func (in *PowerVSSecretReference) DeepCopy() *PowerVSSecretReference { + if in == nil { + return nil + } + out := new(PowerVSSecretReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RootVolume) DeepCopyInto(out *RootVolume) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RootVolume. +func (in *RootVolume) DeepCopy() *RootVolume { + if in == nil { + return nil + } + out := new(RootVolume) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SystemDiskProperties) DeepCopyInto(out *SystemDiskProperties) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SystemDiskProperties. +func (in *SystemDiskProperties) DeepCopy() *SystemDiskProperties { + if in == nil { + return nil + } + out := new(SystemDiskProperties) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Tag) DeepCopyInto(out *Tag) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Tag. +func (in *Tag) DeepCopy() *Tag { + if in == nil { + return nil + } + out := new(Tag) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/api/machine/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/machine/v1/zz_generated.swagger_doc_generated.go new file mode 100644 index 000000000..03f4f8267 --- /dev/null +++ b/vendor/github.com/openshift/api/machine/v1/zz_generated.swagger_doc_generated.go @@ -0,0 +1,411 @@ +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_AlibabaCloudMachineProviderConfig = map[string]string{ + "": "AlibabaCloudMachineProviderConfig is the Schema for the alibabacloudmachineproviderconfig API Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "instanceType": "The instance type of the instance.", + "vpcId": "The ID of the vpc", + "regionId": "The ID of the region in which to create the instance. You can call the DescribeRegions operation to query the most recent region list.", + "zoneId": "The ID of the zone in which to create the instance. You can call the DescribeZones operation to query the most recent region list.", + "imageId": "The ID of the image used to create the instance.", + "dataDisk": "DataDisks holds information regarding the extra disks attached to the instance", + "securityGroups": "SecurityGroups is a list of security group references to assign to the instance. A reference holds either the security group ID, the resource name, or the required tags to search. When more than one security group is returned for a tag search, all the groups are associated with the instance up to the maximum number of security groups to which an instance can belong. For more information, see the \"Security group limits\" section in Limits. https://www.alibabacloud.com/help/en/doc-detail/25412.htm", + "bandwidth": "Bandwidth describes the internet bandwidth strategy for the instance", + "systemDisk": "SystemDisk holds the properties regarding the system disk for the instance", + "vSwitch": "VSwitch is a reference to the vswitch to use for this instance. A reference holds either the vSwitch ID, the resource name, or the required tags to search. When more than one vSwitch is returned for a tag search, only the first vSwitch returned will be used. This parameter is required when you create an instance of the VPC type. You can call the DescribeVSwitches operation to query the created vSwitches.", + "ramRoleName": "RAMRoleName is the name of the instance Resource Access Management (RAM) role. This allows the instance to perform API calls as this specified RAM role.", + "resourceGroup": "ResourceGroup references the resource group to which to assign the instance. A reference holds either the resource group ID, the resource name, or the required tags to search. When more than one resource group are returned for a search, an error will be produced and the Machine will not be created. Resource Groups do not support searching by tags.", + "tenancy": "Tenancy specifies whether to create the instance on a dedicated host. Valid values:\n\ndefault: creates the instance on a non-dedicated host. host: creates the instance on a dedicated host. If you do not specify the DedicatedHostID parameter, Alibaba Cloud automatically selects a dedicated host for the instance. Empty value means no opinion and the platform chooses the a default, which is subject to change over time. Currently the default is `default`.", + "userDataSecret": "UserDataSecret contains a local reference to a secret that contains the UserData to apply to the instance", + "credentialsSecret": "CredentialsSecret is a reference to the secret with alibabacloud credentials. Otherwise, defaults to permissions provided by attached RAM role where the actuator is running.", + "tag": "Tags are the set of metadata to add to an instance.", +} + +func (AlibabaCloudMachineProviderConfig) SwaggerDoc() map[string]string { + return map_AlibabaCloudMachineProviderConfig +} + +var map_AlibabaCloudMachineProviderConfigList = map[string]string{ + "": "AlibabaCloudMachineProviderConfigList contains a list of AlibabaCloudMachineProviderConfig Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (AlibabaCloudMachineProviderConfigList) SwaggerDoc() map[string]string { + return map_AlibabaCloudMachineProviderConfigList +} + +var map_AlibabaCloudMachineProviderStatus = map[string]string{ + "": "AlibabaCloudMachineProviderStatus is the Schema for the alibabacloudmachineproviderconfig API Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "instanceId": "InstanceID is the instance ID of the machine created in alibabacloud", + "instanceState": "InstanceState is the state of the alibabacloud instance for this machine", + "conditions": "Conditions is a set of conditions associated with the Machine to indicate errors or other status", +} + +func (AlibabaCloudMachineProviderStatus) SwaggerDoc() map[string]string { + return map_AlibabaCloudMachineProviderStatus +} + +var map_AlibabaResourceReference = map[string]string{ + "": "ResourceTagReference is a reference to a specific AlibabaCloud resource by ID, or tags. Only one of ID or Tags may be specified. Specifying more than one will result in a validation error.", + "type": "type identifies the resource reference type for this entry.", + "id": "ID of resource", + "name": "Name of the resource", + "tags": "Tags is a set of metadata based upon ECS object tags used to identify a resource. For details about usage when multiple resources are found, please see the owning parent field documentation.", +} + +func (AlibabaResourceReference) SwaggerDoc() map[string]string { + return map_AlibabaResourceReference +} + +var map_BandwidthProperties = map[string]string{ + "": "Bandwidth describes the bandwidth strategy for the network of the instance", + "internetMaxBandwidthIn": "InternetMaxBandwidthIn is the maximum inbound public bandwidth. Unit: Mbit/s. Valid values: When the purchased outbound public bandwidth is less than or equal to 10 Mbit/s, the valid values of this parameter are 1 to 10. Currently the default is `10` when outbound bandwidth is less than or equal to 10 Mbit/s. When the purchased outbound public bandwidth is greater than 10, the valid values are 1 to the InternetMaxBandwidthOut value. Currently the default is the value used for `InternetMaxBandwidthOut` when outbound public bandwidth is greater than 10.", + "internetMaxBandwidthOut": "InternetMaxBandwidthOut is the maximum outbound public bandwidth. Unit: Mbit/s. Valid values: 0 to 100. When a value greater than 0 is used then a public IP address is assigned to the instance. Empty value means no opinion and the platform chooses the a default, which is subject to change over time. Currently the default is `0`", +} + +func (BandwidthProperties) SwaggerDoc() map[string]string { + return map_BandwidthProperties +} + +var map_DataDiskProperties = map[string]string{ + "": "DataDisk contains the information regarding the datadisk attached to an instance", + "Name": "Name is the name of data disk N. If the name is specified the name must be 2 to 128 characters in length. It must start with a letter and cannot start with http:// or https://. It can contain letters, digits, colons (:), underscores (_), and hyphens (-).\n\nEmpty value means the platform chooses a default, which is subject to change over time. Currently the default is `\"\"`.", + "SnapshotID": "SnapshotID is the ID of the snapshot used to create data disk N. Valid values of N: 1 to 16.\n\nWhen the DataDisk.N.SnapshotID parameter is specified, the DataDisk.N.Size parameter is ignored. The data disk is created based on the size of the specified snapshot. Use snapshots created after July 15, 2013. Otherwise, an error is returned and your request is rejected.", + "Size": "Size of the data disk N. Valid values of N: 1 to 16. Unit: GiB. Valid values:\n\nValid values when DataDisk.N.Category is set to cloud_efficiency: 20 to 32768 Valid values when DataDisk.N.Category is set to cloud_ssd: 20 to 32768 Valid values when DataDisk.N.Category is set to cloud_essd: 20 to 32768 Valid values when DataDisk.N.Category is set to cloud: 5 to 2000 The value of this parameter must be greater than or equal to the size of the snapshot specified by the SnapshotID parameter.", + "DiskEncryption": "DiskEncryption specifies whether to encrypt data disk N.\n\nEmpty value means the platform chooses a default, which is subject to change over time. Currently the default is `disabled`.", + "PerformanceLevel": "PerformanceLevel is the performance level of the ESSD used as as data disk N. The N value must be the same as that in DataDisk.N.Category when DataDisk.N.Category is set to cloud_essd. Empty value means no opinion and the platform chooses a default, which is subject to change over time. Currently the default is `PL1`. Valid values:\n\nPL0: A single ESSD can deliver up to 10,000 random read/write IOPS. PL1: A single ESSD can deliver up to 50,000 random read/write IOPS. PL2: A single ESSD can deliver up to 100,000 random read/write IOPS. PL3: A single ESSD can deliver up to 1,000,000 random read/write IOPS. For more information about ESSD performance levels, see ESSDs.", + "Category": "Category describes the type of data disk N. Valid values: cloud_efficiency: ultra disk cloud_ssd: standard SSD cloud_essd: ESSD cloud: basic disk Empty value means no opinion and the platform chooses the a default, which is subject to change over time. Currently for non-I/O optimized instances of retired instance types, the default is `cloud`. Currently for other instances, the default is `cloud_efficiency`.", + "KMSKeyID": "KMSKeyID is the ID of the Key Management Service (KMS) key to be used by data disk N. Empty value means no opinion and the platform chooses the a default, which is subject to change over time. Currently the default is `\"\"` which is interpreted as do not use KMSKey encryption.", + "DiskPreservation": "DiskPreservation specifies whether to release data disk N along with the instance. Empty value means no opinion and the platform chooses the a default, which is subject to change over time. Currently the default is `DeleteWithInstance`", +} + +func (DataDiskProperties) SwaggerDoc() map[string]string { + return map_DataDiskProperties +} + +var map_SystemDiskProperties = map[string]string{ + "": "SystemDiskProperties contains the information regarding the system disk including performance, size, name, and category", + "category": "Category is the category of the system disk. Valid values: cloud_essd: ESSD. When the parameter is set to this value, you can use the SystemDisk.PerformanceLevel parameter to specify the performance level of the disk. cloud_efficiency: ultra disk. cloud_ssd: standard SSD. cloud: basic disk. Empty value means no opinion and the platform chooses the a default, which is subject to change over time. Currently for non-I/O optimized instances of retired instance types, the default is `cloud`. Currently for other instances, the default is `cloud_efficiency`.", + "performanceLevel": "PerformanceLevel is the performance level of the ESSD used as the system disk. Valid values:\n\nPL0: A single ESSD can deliver up to 10,000 random read/write IOPS. PL1: A single ESSD can deliver up to 50,000 random read/write IOPS. PL2: A single ESSD can deliver up to 100,000 random read/write IOPS. PL3: A single ESSD can deliver up to 1,000,000 random read/write IOPS. Empty value means no opinion and the platform chooses a default, which is subject to change over time. Currently the default is `PL1`. For more information about ESSD performance levels, see ESSDs.", + "name": "Name is the name of the system disk. If the name is specified the name must be 2 to 128 characters in length. It must start with a letter and cannot start with http:// or https://. It can contain letters, digits, colons (:), underscores (_), and hyphens (-). Empty value means the platform chooses a default, which is subject to change over time. Currently the default is `\"\"`.", + "size": "Size is the size of the system disk. Unit: GiB. Valid values: 20 to 500. The value must be at least 20 and greater than or equal to the size of the image. Empty value means the platform chooses a default, which is subject to change over time. Currently the default is `40` or the size of the image depending on whichever is greater.", +} + +func (SystemDiskProperties) SwaggerDoc() map[string]string { + return map_SystemDiskProperties +} + +var map_Tag = map[string]string{ + "": "Tag The tags of ECS Instance", + "Key": "Key is the name of the key pair", + "Value": "Value is the value or data of the key pair", +} + +func (Tag) SwaggerDoc() map[string]string { + return map_Tag +} + +var map_AWSResourceFilter = map[string]string{ + "": "AWSResourceFilter is a filter used to identify an AWS resource", + "name": "Name of the filter. Filter names are case-sensitive.", + "values": "Values includes one or more filter values. Filter values are case-sensitive.", +} + +func (AWSResourceFilter) SwaggerDoc() map[string]string { + return map_AWSResourceFilter +} + +var map_AWSResourceReference = map[string]string{ + "": "AWSResourceReference is a reference to a specific AWS resource by ID, ARN, or filters. Only one of ID, ARN or Filters may be specified. Specifying more than one will result in a validation error.", + "type": "Type determines how the reference will fetch the AWS resource.", + "id": "ID of resource.", + "arn": "ARN of resource.", + "filters": "Filters is a set of filters used to identify a resource.", +} + +func (AWSResourceReference) SwaggerDoc() map[string]string { + return map_AWSResourceReference +} + +var map_AWSFailureDomain = map[string]string{ + "": "AWSFailureDomain configures failure domain information for the AWS platform.", + "subnet": "Subnet is a reference to the subnet to use for this instance.", + "placement": "Placement configures the placement information for this instance.", +} + +func (AWSFailureDomain) SwaggerDoc() map[string]string { + return map_AWSFailureDomain +} + +var map_AWSFailureDomainPlacement = map[string]string{ + "": "AWSFailureDomainPlacement configures the placement information for the AWSFailureDomain.", + "availabilityZone": "AvailabilityZone is the availability zone of the instance.", +} + +func (AWSFailureDomainPlacement) SwaggerDoc() map[string]string { + return map_AWSFailureDomainPlacement +} + +var map_AzureFailureDomain = map[string]string{ + "": "AzureFailureDomain configures failure domain information for the Azure platform.", + "zone": "Availability Zone for the virtual machine. If nil, the virtual machine should be deployed to no zone.", +} + +func (AzureFailureDomain) SwaggerDoc() map[string]string { + return map_AzureFailureDomain +} + +var map_ControlPlaneMachineSet = map[string]string{ + "": "ControlPlaneMachineSet ensures that a specified number of control plane machine replicas are running at any given time. Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (ControlPlaneMachineSet) SwaggerDoc() map[string]string { + return map_ControlPlaneMachineSet +} + +var map_ControlPlaneMachineSetList = map[string]string{ + "": "ControlPlaneMachineSetList contains a list of ControlPlaneMachineSet Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (ControlPlaneMachineSetList) SwaggerDoc() map[string]string { + return map_ControlPlaneMachineSetList +} + +var map_ControlPlaneMachineSetSpec = map[string]string{ + "": "ControlPlaneMachineSet represents the configuration of the ControlPlaneMachineSet.", + "state": "State defines whether the ControlPlaneMachineSet is Active or Inactive. When Inactive, the ControlPlaneMachineSet will not take any action on the state of the Machines within the cluster. When Active, the ControlPlaneMachineSet will reconcile the Machines and will update the Machines as necessary. Once Active, a ControlPlaneMachineSet cannot be made Inactive. To prevent further action please remove the ControlPlaneMachineSet.", + "replicas": "Replicas defines how many Control Plane Machines should be created by this ControlPlaneMachineSet. This field is immutable and cannot be changed after cluster installation. The ControlPlaneMachineSet only operates with 3 or 5 node control planes, 3 and 5 are the only valid values for this field.", + "strategy": "Strategy defines how the ControlPlaneMachineSet will update Machines when it detects a change to the ProviderSpec.", + "selector": "Label selector for Machines. Existing Machines selected by this selector will be the ones affected by this ControlPlaneMachineSet. It must match the template's labels. This field is considered immutable after creation of the resource.", + "template": "Template describes the Control Plane Machines that will be created by this ControlPlaneMachineSet.", +} + +func (ControlPlaneMachineSetSpec) SwaggerDoc() map[string]string { + return map_ControlPlaneMachineSetSpec +} + +var map_ControlPlaneMachineSetStatus = map[string]string{ + "": "ControlPlaneMachineSetStatus represents the status of the ControlPlaneMachineSet CRD.", + "conditions": "Conditions represents the observations of the ControlPlaneMachineSet's current state. Known .status.conditions.type are: Available, Degraded and Progressing.", + "observedGeneration": "ObservedGeneration is the most recent generation observed for this ControlPlaneMachineSet. It corresponds to the ControlPlaneMachineSets's generation, which is updated on mutation by the API Server.", + "replicas": "Replicas is the number of Control Plane Machines created by the ControlPlaneMachineSet controller. Note that during update operations this value may differ from the desired replica count.", + "readyReplicas": "ReadyReplicas is the number of Control Plane Machines created by the ControlPlaneMachineSet controller which are ready. Note that this value may be higher than the desired number of replicas while rolling updates are in-progress.", + "updatedReplicas": "UpdatedReplicas is the number of non-terminated Control Plane Machines created by the ControlPlaneMachineSet controller that have the desired provider spec and are ready. This value is set to 0 when a change is detected to the desired spec. When the update strategy is RollingUpdate, this will also coincide with starting the process of updating the Machines. When the update strategy is OnDelete, this value will remain at 0 until a user deletes an existing replica and its replacement has become ready.", + "unavailableReplicas": "UnavailableReplicas is the number of Control Plane Machines that are still required before the ControlPlaneMachineSet reaches the desired available capacity. When this value is non-zero, the number of ReadyReplicas is less than the desired Replicas.", +} + +func (ControlPlaneMachineSetStatus) SwaggerDoc() map[string]string { + return map_ControlPlaneMachineSetStatus +} + +var map_ControlPlaneMachineSetStrategy = map[string]string{ + "": "ControlPlaneMachineSetStrategy defines the strategy for applying updates to the Control Plane Machines managed by the ControlPlaneMachineSet.", + "type": "Type defines the type of update strategy that should be used when updating Machines owned by the ControlPlaneMachineSet. Valid values are \"RollingUpdate\" and \"OnDelete\". The current default value is \"RollingUpdate\".", +} + +func (ControlPlaneMachineSetStrategy) SwaggerDoc() map[string]string { + return map_ControlPlaneMachineSetStrategy +} + +var map_ControlPlaneMachineSetTemplate = map[string]string{ + "": "ControlPlaneMachineSetTemplate is a template used by the ControlPlaneMachineSet to create the Machines that it will manage in the future. ", + "machineType": "MachineType determines the type of Machines that should be managed by the ControlPlaneMachineSet. Currently, the only valid value is machines_v1beta1_machine_openshift_io.", + "machines_v1beta1_machine_openshift_io": "OpenShiftMachineV1Beta1Machine defines the template for creating Machines from the v1beta1.machine.openshift.io API group.", +} + +func (ControlPlaneMachineSetTemplate) SwaggerDoc() map[string]string { + return map_ControlPlaneMachineSetTemplate +} + +var map_ControlPlaneMachineSetTemplateObjectMeta = map[string]string{ + "": "ControlPlaneMachineSetTemplateObjectMeta is a subset of the metav1.ObjectMeta struct. It allows users to specify labels and annotations that will be copied onto Machines created from this template.", + "labels": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels. This field must contain both the 'machine.openshift.io/cluster-api-machine-role' and 'machine.openshift.io/cluster-api-machine-type' labels, both with a value of 'master'. It must also contain a label with the key 'machine.openshift.io/cluster-api-cluster'.", + "annotations": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations", +} + +func (ControlPlaneMachineSetTemplateObjectMeta) SwaggerDoc() map[string]string { + return map_ControlPlaneMachineSetTemplateObjectMeta +} + +var map_FailureDomains = map[string]string{ + "": "FailureDomain represents the different configurations required to spread Machines across failure domains on different platforms.", + "platform": "Platform identifies the platform for which the FailureDomain represents. Currently supported values are AWS, Azure, and GCP.", + "aws": "AWS configures failure domain information for the AWS platform.", + "azure": "Azure configures failure domain information for the Azure platform.", + "gcp": "GCP configures failure domain information for the GCP platform.", + "openstack": "OpenStack configures failure domain information for the OpenStack platform.", +} + +func (FailureDomains) SwaggerDoc() map[string]string { + return map_FailureDomains +} + +var map_GCPFailureDomain = map[string]string{ + "": "GCPFailureDomain configures failure domain information for the GCP platform", + "zone": "Zone is the zone in which the GCP machine provider will create the VM.", +} + +func (GCPFailureDomain) SwaggerDoc() map[string]string { + return map_GCPFailureDomain +} + +var map_OpenShiftMachineV1Beta1MachineTemplate = map[string]string{ + "": "OpenShiftMachineV1Beta1MachineTemplate is a template for the ControlPlaneMachineSet to create Machines from the v1beta1.machine.openshift.io API group.", + "failureDomains": "FailureDomains is the list of failure domains (sometimes called availability zones) in which the ControlPlaneMachineSet should balance the Control Plane Machines. This will be merged into the ProviderSpec given in the template. This field is optional on platforms that do not require placement information.", + "metadata": "ObjectMeta is the standard object metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata Labels are required to match the ControlPlaneMachineSet selector.", + "spec": "Spec contains the desired configuration of the Control Plane Machines. The ProviderSpec within contains platform specific details for creating the Control Plane Machines. The ProviderSe should be complete apart from the platform specific failure domain field. This will be overriden when the Machines are created based on the FailureDomains field.", +} + +func (OpenShiftMachineV1Beta1MachineTemplate) SwaggerDoc() map[string]string { + return map_OpenShiftMachineV1Beta1MachineTemplate +} + +var map_OpenStackFailureDomain = map[string]string{ + "": "OpenStackFailureDomain configures failure domain information for the OpenStack platform.", + "availabilityZone": "availabilityZone is the nova availability zone in which the OpenStack machine provider will create the VM. If not specified, the VM will be created in the default availability zone specified in the nova configuration. Availability zone names must NOT contain : since it is used by admin users to specify hosts where instances are launched in server creation. Also, it must not contain spaces otherwise it will lead to node that belongs to this availability zone register failure, see kubernetes/cloud-provider-openstack#1379 for further information. The maximum length of availability zone name is 63 as per labels limits.", + "rootVolume": "rootVolume contains settings that will be used by the OpenStack machine provider to create the root volume attached to the VM. If not specified, no root volume will be created.", +} + +func (OpenStackFailureDomain) SwaggerDoc() map[string]string { + return map_OpenStackFailureDomain +} + +var map_RootVolume = map[string]string{ + "": "RootVolume represents the volume metadata to boot from. The original RootVolume struct is defined in the v1alpha1 but it's not best practice to use it directly here so we define a new one that should stay in sync with the original one.", + "availabilityZone": "availabilityZone specifies the Cinder availability zone where the root volume will be created. If not specifified, the root volume will be created in the availability zone specified by the volume type in the cinder configuration. If the volume type (configured in the OpenStack cluster) does not specify an availability zone, the root volume will be created in the default availability zone specified in the cinder configuration. See https://docs.openstack.org/cinder/latest/admin/availability-zone-type.html for more details. If the OpenStack cluster is deployed with the cross_az_attach configuration option set to false, the root volume will have to be in the same availability zone as the VM (defined by OpenStackFailureDomain.AvailabilityZone). Availability zone names must NOT contain spaces otherwise it will lead to volume that belongs to this availability zone register failure, see kubernetes/cloud-provider-openstack#1379 for further information. The maximum length of availability zone name is 63 as per labels limits.", + "volumeType": "volumeType specifies the type of the root volume that will be provisioned. The maximum length of a volume type name is 255 characters, as per the OpenStack limit. ", +} + +func (RootVolume) SwaggerDoc() map[string]string { + return map_RootVolume +} + +var map_NutanixCategory = map[string]string{ + "": "NutanixCategory identifies a pair of prism category key and value", + "key": "key is the prism category key name", + "value": "value is the prism category value associated with the key", +} + +func (NutanixCategory) SwaggerDoc() map[string]string { + return map_NutanixCategory +} + +var map_NutanixMachineProviderConfig = map[string]string{ + "": "NutanixMachineProviderConfig is the Schema for the nutanixmachineproviderconfigs API Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "cluster": "cluster is to identify the cluster (the Prism Element under management of the Prism Central), in which the Machine's VM will be created. The cluster identifier (uuid or name) can be obtained from the Prism Central console or using the prism_central API.", + "image": "image is to identify the rhcos image uploaded to the Prism Central (PC) The image identifier (uuid or name) can be obtained from the Prism Central console or using the prism_central API.", + "subnets": "subnets holds a list of identifiers (one or more) of the cluster's network subnets for the Machine's VM to connect to. The subnet identifiers (uuid or name) can be obtained from the Prism Central console or using the prism_central API.", + "vcpusPerSocket": "vcpusPerSocket is the number of vCPUs per socket of the VM", + "vcpuSockets": "vcpuSockets is the number of vCPU sockets of the VM", + "memorySize": "memorySize is the memory size (in Quantity format) of the VM The minimum memorySize is 2Gi bytes", + "systemDiskSize": "systemDiskSize is size (in Quantity format) of the system disk of the VM The minimum systemDiskSize is 20Gi bytes", + "bootType": "bootType indicates the boot type (Legacy, UEFI or SecureBoot) the Machine's VM uses to boot. If this field is empty or omitted, the VM will use the default boot type \"Legacy\" to boot. \"SecureBoot\" depends on \"UEFI\" boot, i.e., enabling \"SecureBoot\" means that \"UEFI\" boot is also enabled.", + "project": "project optionally identifies a Prism project for the Machine's VM to associate with.", + "categories": "categories optionally adds one or more prism categories (each with key and value) for the Machine's VM to associate with. All the category key and value pairs specified must already exist in the prism central.", + "userDataSecret": "userDataSecret is a local reference to a secret that contains the UserData to apply to the VM", + "credentialsSecret": "credentialsSecret is a local reference to a secret that contains the credentials data to access Nutanix PC client", +} + +func (NutanixMachineProviderConfig) SwaggerDoc() map[string]string { + return map_NutanixMachineProviderConfig +} + +var map_NutanixMachineProviderStatus = map[string]string{ + "": "NutanixMachineProviderStatus is the type that will be embedded in a Machine.Status.ProviderStatus field. It contains nutanix-specific status information. Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "conditions": "conditions is a set of conditions associated with the Machine to indicate errors or other status", + "vmUUID": "vmUUID is the Machine associated VM's UUID The field is missing before the VM is created. Once the VM is created, the field is filled with the VM's UUID and it will not change. The vmUUID is used to find the VM when updating the Machine status, and to delete the VM when the Machine is deleted.", +} + +func (NutanixMachineProviderStatus) SwaggerDoc() map[string]string { + return map_NutanixMachineProviderStatus +} + +var map_NutanixResourceIdentifier = map[string]string{ + "": "NutanixResourceIdentifier holds the identity of a Nutanix PC resource (cluster, image, subnet, etc.)", + "type": "Type is the identifier type to use for this resource.", + "uuid": "uuid is the UUID of the resource in the PC.", + "name": "name is the resource name in the PC", +} + +func (NutanixResourceIdentifier) SwaggerDoc() map[string]string { + return map_NutanixResourceIdentifier +} + +var map_LoadBalancerReference = map[string]string{ + "": "LoadBalancerReference is a reference to a load balancer on IBM Cloud virtual private cloud(VPC).", + "name": "name of the LoadBalancer in IBM Cloud VPC. The name should be between 1 and 63 characters long and may consist of lowercase alphanumeric characters and hyphens only. The value must not end with a hyphen. It is a reference to existing LoadBalancer created by openshift installer component.", + "type": "type of the LoadBalancer service supported by IBM Cloud VPC. Currently, only Application LoadBalancer is supported. More details about Application LoadBalancer https://cloud.ibm.com/docs/vpc?topic=vpc-load-balancers-about&interface=ui Supported values are Application.", +} + +func (LoadBalancerReference) SwaggerDoc() map[string]string { + return map_LoadBalancerReference +} + +var map_PowerVSMachineProviderConfig = map[string]string{ + "": "PowerVSMachineProviderConfig is the type that will be embedded in a Machine.Spec.ProviderSpec field for a PowerVS virtual machine. It is used by the PowerVS machine actuator to create a single Machine.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "userDataSecret": "userDataSecret contains a local reference to a secret that contains the UserData to apply to the instance.", + "credentialsSecret": "credentialsSecret is a reference to the secret with IBM Cloud credentials.", + "serviceInstance": "serviceInstance is the reference to the Power VS service on which the server instance(VM) will be created. Power VS service is a container for all Power VS instances at a specific geographic region. serviceInstance can be created via IBM Cloud catalog or CLI. supported serviceInstance identifier in PowerVSResource are Name and ID and that can be obtained from IBM Cloud UI or IBM Cloud cli. More detail about Power VS service instance. https://cloud.ibm.com/docs/power-iaas?topic=power-iaas-creating-power-virtual-server", + "image": "image is to identify the rhcos image uploaded to IBM COS bucket which is used to create the instance. supported image identifier in PowerVSResource are Name and ID and that can be obtained from IBM Cloud UI or IBM Cloud cli.", + "network": "network is the reference to the Network to use for this instance. supported network identifier in PowerVSResource are Name, ID and RegEx and that can be obtained from IBM Cloud UI or IBM Cloud cli.", + "keyPairName": "keyPairName is the name of the KeyPair to use for SSH. The key pair will be exposed to the instance via the instance metadata service. On boot, the OS will copy the public keypair into the authorized keys for the core user.", + "systemType": "systemType is the System type used to host the instance. systemType determines the number of cores and memory that is available. Few of the supported SystemTypes are s922,e880,e980. e880 systemType available only in Dallas Datacenters. e980 systemType available in Datacenters except Dallas and Washington. When omitted, this means that the user has no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default is s922 which is generally available.", + "processorType": "processorType is the VM instance processor type. It must be set to one of the following values: Dedicated, Capped or Shared. Dedicated: resources are allocated for a specific client, The hypervisor makes a 1:1 binding of a partition’s processor to a physical processor core. Shared: Shared among other clients. Capped: Shared, but resources do not expand beyond those that are requested, the amount of CPU time is Capped to the value specified for the entitlement. if the processorType is selected as Dedicated, then processors value cannot be fractional. When omitted, this means that the user has no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default is Shared.", + "processors": "processors is the number of virtual processors in a virtual machine. when the processorType is selected as Dedicated the processors value cannot be fractional. maximum value for the Processors depends on the selected SystemType. when SystemType is set to e880 or e980 maximum Processors value is 143. when SystemType is set to s922 maximum Processors value is 15. minimum value for Processors depends on the selected ProcessorType. when ProcessorType is set as Shared or Capped, The minimum processors is 0.5. when ProcessorType is set as Dedicated, The minimum processors is 1. When omitted, this means that the user has no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The default is set based on the selected ProcessorType. when ProcessorType selected as Dedicated, the default is set to 1. when ProcessorType selected as Shared or Capped, the default is set to 0.5.", + "memoryGiB": "memoryGiB is the size of a virtual machine's memory, in GiB. maximum value for the MemoryGiB depends on the selected SystemType. when SystemType is set to e880 maximum MemoryGiB value is 7463 GiB. when SystemType is set to e980 maximum MemoryGiB value is 15307 GiB. when SystemType is set to s922 maximum MemoryGiB value is 942 GiB. The minimum memory is 32 GiB. When omitted, this means the user has no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default is 32.", + "loadBalancers": "loadBalancers is the set of load balancers to which the new control plane instance should be added once it is created.", +} + +func (PowerVSMachineProviderConfig) SwaggerDoc() map[string]string { + return map_PowerVSMachineProviderConfig +} + +var map_PowerVSMachineProviderStatus = map[string]string{ + "": "PowerVSMachineProviderStatus is the type that will be embedded in a Machine.Status.ProviderStatus field. It contains PowerVS-specific status information.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "conditions": "conditions is a set of conditions associated with the Machine to indicate errors or other status", + "instanceId": "instanceId is the instance ID of the machine created in PowerVS instanceId uniquely identifies a Power VS server instance(VM) under a Power VS service. This will help in updating or deleting a VM in Power VS Cloud", + "serviceInstanceID": "serviceInstanceID is the reference to the Power VS ServiceInstance on which the machine instance will be created. serviceInstanceID uniquely identifies the Power VS service By setting serviceInstanceID it will become easy and efficient to fetch a server instance(VM) within Power VS Cloud.", + "instanceState": "instanceState is the state of the PowerVS instance for this machine Possible instance states are Active, Build, ShutOff, Reboot This is used to display additional information to user regarding instance current state", +} + +func (PowerVSMachineProviderStatus) SwaggerDoc() map[string]string { + return map_PowerVSMachineProviderStatus +} + +var map_PowerVSResource = map[string]string{ + "": "PowerVSResource is a reference to a specific PowerVS resource by ID, Name or RegEx Only one of ID, Name or RegEx may be specified. Specifying more than one will result in a validation error.", + "type": "Type identifies the resource type for this entry. Valid values are ID, Name and RegEx", + "id": "ID of resource", + "name": "Name of resource", + "regex": "Regex to find resource Regex contains the pattern to match to find a resource", +} + +func (PowerVSResource) SwaggerDoc() map[string]string { + return map_PowerVSResource +} + +var map_PowerVSSecretReference = map[string]string{ + "": "PowerVSSecretReference contains enough information to locate the referenced secret inside the same namespace.", + "name": "Name of the secret.", +} + +func (PowerVSSecretReference) SwaggerDoc() map[string]string { + return map_PowerVSSecretReference +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/api/machine/v1alpha1/doc.go b/vendor/github.com/openshift/api/machine/v1alpha1/doc.go new file mode 100644 index 000000000..111cacb63 --- /dev/null +++ b/vendor/github.com/openshift/api/machine/v1alpha1/doc.go @@ -0,0 +1,7 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true + +// +kubebuilder:validation:Optional +// +groupName=machine.openshift.io +package v1alpha1 diff --git a/vendor/github.com/openshift/api/machine/v1alpha1/register.go b/vendor/github.com/openshift/api/machine/v1alpha1/register.go new file mode 100644 index 000000000..ef96c4720 --- /dev/null +++ b/vendor/github.com/openshift/api/machine/v1alpha1/register.go @@ -0,0 +1,38 @@ +/* + Copyright 2022 Red Hat, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +const GroupName = "machine.openshift.io" + +var ( + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // Install is a function which adds this version to a scheme + Install = schemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + metav1.AddToGroupVersion(scheme, GroupVersion) + return nil +} diff --git a/vendor/github.com/openshift/api/machine/v1alpha1/types_openstack.go b/vendor/github.com/openshift/api/machine/v1alpha1/types_openstack.go new file mode 100644 index 000000000..e3dd4d0a0 --- /dev/null +++ b/vendor/github.com/openshift/api/machine/v1alpha1/types_openstack.go @@ -0,0 +1,368 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// OpenstackProviderSpec is the type that will be embedded in a Machine.Spec.ProviderSpec field +// for an OpenStack Instance. It is used by the Openstack machine actuator to create a single machine instance. +// +k8s:openapi-gen=true +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type OpenstackProviderSpec struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty"` + + // The name of the secret containing the openstack credentials + CloudsSecret *corev1.SecretReference `json:"cloudsSecret"` + + // The name of the cloud to use from the clouds secret + CloudName string `json:"cloudName"` + + // The flavor reference for the flavor for your server instance. + Flavor string `json:"flavor"` + + // The name of the image to use for your server instance. + // If the RootVolume is specified, this will be ignored and use rootVolume directly. + Image string `json:"image"` + + // The ssh key to inject in the instance + KeyName string `json:"keyName,omitempty"` + + // The machine ssh username + SshUserName string `json:"sshUserName,omitempty"` + + // A networks object. Required parameter when there are multiple networks defined for the tenant. + // When you do not specify the networks parameter, the server attaches to the only network created for the current tenant. + Networks []NetworkParam `json:"networks,omitempty"` + + // Create and assign additional ports to instances + Ports []PortOpts `json:"ports,omitempty"` + + // floatingIP specifies a floating IP to be associated with the machine. + // Note that it is not safe to use this parameter in a MachineSet, as + // only one Machine may be assigned the same floating IP. + // + // Deprecated: floatingIP will be removed in a future release as it cannot be implemented correctly. + FloatingIP string `json:"floatingIP,omitempty"` + + // The availability zone from which to launch the server. + AvailabilityZone string `json:"availabilityZone,omitempty"` + + // The names of the security groups to assign to the instance + SecurityGroups []SecurityGroupParam `json:"securityGroups,omitempty"` + + // The name of the secret containing the user data (startup script in most cases) + UserDataSecret *corev1.SecretReference `json:"userDataSecret,omitempty"` + + // Whether the server instance is created on a trunk port or not. + Trunk bool `json:"trunk,omitempty"` + + // Machine tags + // Requires Nova api 2.52 minimum! + Tags []string `json:"tags,omitempty"` + + // Metadata mapping. Allows you to create a map of key value pairs to add to the server instance. + ServerMetadata map[string]string `json:"serverMetadata,omitempty"` + + // Config Drive support + ConfigDrive *bool `json:"configDrive,omitempty"` + + // The volume metadata to boot from + RootVolume *RootVolume `json:"rootVolume,omitempty"` + + // The server group to assign the machine to. + ServerGroupID string `json:"serverGroupID,omitempty"` + + // The server group to assign the machine to. A server group with that + // name will be created if it does not exist. If both ServerGroupID and + // ServerGroupName are non-empty, they must refer to the same OpenStack + // resource. + ServerGroupName string `json:"serverGroupName,omitempty"` + + // The subnet that a set of machines will get ingress/egress traffic from + PrimarySubnet string `json:"primarySubnet,omitempty"` +} + +type SecurityGroupParam struct { + // Security Group UUID + UUID string `json:"uuid,omitempty"` + // Security Group name + Name string `json:"name,omitempty"` + // Filters used to query security groups in openstack + Filter SecurityGroupFilter `json:"filter,omitempty"` +} + +type SecurityGroupFilter struct { + // id specifies the ID of a security group to use. If set, id will not + // be validated before use. An invalid id will result in failure to + // create a server with an appropriate error message. + ID string `json:"id,omitempty"` + // name filters security groups by name. + Name string `json:"name,omitempty"` + // description filters security groups by description. + Description string `json:"description,omitempty"` + // tenantId filters security groups by tenant ID. + // Deprecated: use projectId instead. tenantId will be ignored if projectId is set. + TenantID string `json:"tenantId,omitempty"` + // projectId filters security groups by project ID. + ProjectID string `json:"projectId,omitempty"` + // tags filters by security groups containing all specified tags. + // Multiple tags are comma separated. + Tags string `json:"tags,omitempty"` + // tagsAny filters by security groups containing any specified tags. + // Multiple tags are comma separated. + TagsAny string `json:"tagsAny,omitempty"` + // notTags filters by security groups which don't match all specified tags. NOT (t1 AND t2...) + // Multiple tags are comma separated. + NotTags string `json:"notTags,omitempty"` + // notTagsAny filters by security groups which don't match any specified tags. NOT (t1 OR t2...) + // Multiple tags are comma separated. + NotTagsAny string `json:"notTagsAny,omitempty"` + + // Deprecated: limit is silently ignored. It has no replacement. + DeprecatedLimit int `json:"limit,omitempty"` + // Deprecated: marker is silently ignored. It has no replacement. + DeprecatedMarker string `json:"marker,omitempty"` + // Deprecated: sortKey is silently ignored. It has no replacement. + DeprecatedSortKey string `json:"sortKey,omitempty"` + // Deprecated: sortDir is silently ignored. It has no replacement. + DeprecatedSortDir string `json:"sortDir,omitempty"` +} + +type NetworkParam struct { + // The UUID of the network. Required if you omit the port attribute. + UUID string `json:"uuid,omitempty"` + // A fixed IPv4 address for the NIC. + FixedIp string `json:"fixedIp,omitempty"` + // Filters for optional network query + Filter Filter `json:"filter,omitempty"` + // Subnet within a network to use + Subnets []SubnetParam `json:"subnets,omitempty"` + // NoAllowedAddressPairs disables creation of allowed address pairs for the network ports + NoAllowedAddressPairs bool `json:"noAllowedAddressPairs,omitempty"` + // PortTags allows users to specify a list of tags to add to ports created in a given network + PortTags []string `json:"portTags,omitempty"` + // The virtual network interface card (vNIC) type that is bound to the + // neutron port. + VNICType string `json:"vnicType,omitempty"` + // A dictionary that enables the application running on the specified + // host to pass and receive virtual network interface (VIF) port-specific + // information to the plug-in. + Profile map[string]string `json:"profile,omitempty"` + // PortSecurity optionally enables or disables security on ports managed by OpenStack + PortSecurity *bool `json:"portSecurity,omitempty"` +} + +type Filter struct { + // Deprecated: use NetworkParam.uuid instead. Ignored if NetworkParam.uuid is set. + ID string `json:"id,omitempty"` + // name filters networks by name. + Name string `json:"name,omitempty"` + // description filters networks by description. + Description string `json:"description,omitempty"` + // tenantId filters networks by tenant ID. + // Deprecated: use projectId instead. tenantId will be ignored if projectId is set. + TenantID string `json:"tenantId,omitempty"` + // projectId filters networks by project ID. + ProjectID string `json:"projectId,omitempty"` + // tags filters by networks containing all specified tags. + // Multiple tags are comma separated. + Tags string `json:"tags,omitempty"` + // tagsAny filters by networks containing any specified tags. + // Multiple tags are comma separated. + TagsAny string `json:"tagsAny,omitempty"` + // notTags filters by networks which don't match all specified tags. NOT (t1 AND t2...) + // Multiple tags are comma separated. + NotTags string `json:"notTags,omitempty"` + // notTagsAny filters by networks which don't match any specified tags. NOT (t1 OR t2...) + // Multiple tags are comma separated. + NotTagsAny string `json:"notTagsAny,omitempty"` + + // Deprecated: status is silently ignored. It has no replacement. + DeprecatedStatus string `json:"status,omitempty"` + // Deprecated: adminStateUp is silently ignored. It has no replacement. + DeprecatedAdminStateUp *bool `json:"adminStateUp,omitempty"` + // Deprecated: shared is silently ignored. It has no replacement. + DeprecatedShared *bool `json:"shared,omitempty"` + // Deprecated: marker is silently ignored. It has no replacement. + DeprecatedMarker string `json:"marker,omitempty"` + // Deprecated: limit is silently ignored. It has no replacement. + DeprecatedLimit int `json:"limit,omitempty"` + // Deprecated: sortKey is silently ignored. It has no replacement. + DeprecatedSortKey string `json:"sortKey,omitempty"` + // Deprecated: sortDir is silently ignored. It has no replacement. + DeprecatedSortDir string `json:"sortDir,omitempty"` +} + +type SubnetParam struct { + // The UUID of the network. Required if you omit the port attribute. + UUID string `json:"uuid,omitempty"` + + // Filters for optional network query + Filter SubnetFilter `json:"filter,omitempty"` + + // PortTags are tags that are added to ports created on this subnet + PortTags []string `json:"portTags,omitempty"` + + // PortSecurity optionally enables or disables security on ports managed by OpenStack + PortSecurity *bool `json:"portSecurity,omitempty"` +} + +type SubnetFilter struct { + // id is the uuid of a specific subnet to use. If specified, id will not + // be validated. Instead server creation will fail with an appropriate + // error. + ID string `json:"id,omitempty"` + // name filters subnets by name. + Name string `json:"name,omitempty"` + // description filters subnets by description. + Description string `json:"description,omitempty"` + // Deprecated: networkId is silently ignored. Set uuid on the containing network definition instead. + NetworkID string `json:"networkId,omitempty"` + // tenantId filters subnets by tenant ID. + // Deprecated: use projectId instead. tenantId will be ignored if projectId is set. + TenantID string `json:"tenantId,omitempty"` + // projectId filters subnets by project ID. + ProjectID string `json:"projectId,omitempty"` + // ipVersion filters subnets by IP version. + IPVersion int `json:"ipVersion,omitempty"` + // gateway_ip filters subnets by gateway IP. + GatewayIP string `json:"gateway_ip,omitempty"` + // cidr filters subnets by CIDR. + CIDR string `json:"cidr,omitempty"` + // ipv6AddressMode filters subnets by IPv6 address mode. + IPv6AddressMode string `json:"ipv6AddressMode,omitempty"` + // ipv6RaMode filters subnets by IPv6 router adversiement mode. + IPv6RAMode string `json:"ipv6RaMode,omitempty"` + // subnetpoolId filters subnets by subnet pool ID. + SubnetPoolID string `json:"subnetpoolId,omitempty"` + // tags filters by subnets containing all specified tags. + // Multiple tags are comma separated. + Tags string `json:"tags,omitempty"` + // tagsAny filters by subnets containing any specified tags. + // Multiple tags are comma separated. + TagsAny string `json:"tagsAny,omitempty"` + // notTags filters by subnets which don't match all specified tags. NOT (t1 AND t2...) + // Multiple tags are comma separated. + NotTags string `json:"notTags,omitempty"` + // notTagsAny filters by subnets which don't match any specified tags. NOT (t1 OR t2...) + // Multiple tags are comma separated. + NotTagsAny string `json:"notTagsAny,omitempty"` + + // Deprecated: enableDhcp is silently ignored. It has no replacement. + DeprecatedEnableDHCP *bool `json:"enableDhcp,omitempty"` + // Deprecated: limit is silently ignored. It has no replacement. + DeprecatedLimit int `json:"limit,omitempty"` + // Deprecated: marker is silently ignored. It has no replacement. + DeprecatedMarker string `json:"marker,omitempty"` + // Deprecated: sortKey is silently ignored. It has no replacement. + DeprecatedSortKey string `json:"sortKey,omitempty"` + // Deprecated: sortDir is silently ignored. It has no replacement. + DeprecatedSortDir string `json:"sortDir,omitempty"` +} + +type PortOpts struct { + // networkID is the ID of the network the port will be created in. It is required. + // +required + NetworkID string `json:"networkID"` + // If nameSuffix is specified the created port will be named -. + // If not specified the port will be named -. + NameSuffix string `json:"nameSuffix,omitempty"` + // description specifies the description of the created port. + Description string `json:"description,omitempty"` + // adminStateUp sets the administrative state of the created port to up (true), or down (false). + AdminStateUp *bool `json:"adminStateUp,omitempty"` + // macAddress specifies the MAC address of the created port. + MACAddress string `json:"macAddress,omitempty"` + // fixedIPs specifies a set of fixed IPs to assign to the port. They must all be valid for the port's network. + FixedIPs []FixedIPs `json:"fixedIPs,omitempty"` + // tenantID specifies the tenant ID of the created port. Note that this + // requires OpenShift to have administrative permissions, which is + // typically not the case. Use of this field is not recommended. + // Deprecated: use projectID instead. It will be ignored if projectID is set. + TenantID string `json:"tenantID,omitempty"` + // projectID specifies the project ID of the created port. Note that this + // requires OpenShift to have administrative permissions, which is + // typically not the case. Use of this field is not recommended. + ProjectID string `json:"projectID,omitempty"` + // securityGroups specifies a set of security group UUIDs to use instead + // of the machine's default security groups. The default security groups + // will be used if this is left empty or not specified. + SecurityGroups *[]string `json:"securityGroups,omitempty"` + // allowedAddressPairs specifies a set of allowed address pairs to add to the port. + AllowedAddressPairs []AddressPair `json:"allowedAddressPairs,omitempty"` + // tags species a set of tags to add to the port. + Tags []string `json:"tags,omitempty"` + // The virtual network interface card (vNIC) type that is bound to the + // neutron port. + VNICType string `json:"vnicType,omitempty"` + // A dictionary that enables the application running on the specified + // host to pass and receive virtual network interface (VIF) port-specific + // information to the plug-in. + Profile map[string]string `json:"profile,omitempty"` + // enable or disable security on a given port + // incompatible with securityGroups and allowedAddressPairs + PortSecurity *bool `json:"portSecurity,omitempty"` + // Enables and disables trunk at port level. If not provided, openStackMachine.Spec.Trunk is inherited. + Trunk *bool `json:"trunk,omitempty"` + + // The ID of the host where the port is allocated. Do not use this + // field: it cannot be used correctly. + // Deprecated: hostID is silently ignored. It will be removed with no replacement. + DeprecatedHostID string `json:"hostID,omitempty"` +} + +type AddressPair struct { + IPAddress string `json:"ipAddress,omitempty"` + MACAddress string `json:"macAddress,omitempty"` +} + +type FixedIPs struct { + // subnetID specifies the ID of the subnet where the fixed IP will be allocated. + SubnetID string `json:"subnetID"` + // ipAddress is a specific IP address to use in the given subnet. Port + // creation will fail if the address is not available. If not specified, + // an available IP from the given subnet will be selected automatically. + IPAddress string `json:"ipAddress,omitempty"` +} + +type RootVolume struct { + // sourceUUID specifies the UUID of a glance image used to populate the root volume. + // Deprecated: set image in the platform spec instead. This will be + // ignored if image is set in the platform spec. + SourceUUID string `json:"sourceUUID,omitempty"` + // volumeType specifies a volume type to use when creating the root + // volume. If not specified the default volume type will be used. + VolumeType string `json:"volumeType,omitempty"` + // diskSize specifies the size, in GB, of the created root volume. + Size int `json:"diskSize,omitempty"` + // availabilityZone specifies the Cinder availability where the root volume will be created. + Zone string `json:"availabilityZone,omitempty"` + + // Deprecated: sourceType will be silently ignored. There is no replacement. + DeprecatedSourceType string `json:"sourceType,omitempty"` + // Deprecated: deviceType will be silently ignored. There is no replacement. + DeprecatedDeviceType string `json:"deviceType,omitempty"` +} diff --git a/vendor/github.com/openshift/api/machine/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/machine/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 000000000..7210713e3 --- /dev/null +++ b/vendor/github.com/openshift/api/machine/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,346 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AddressPair) DeepCopyInto(out *AddressPair) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AddressPair. +func (in *AddressPair) DeepCopy() *AddressPair { + if in == nil { + return nil + } + out := new(AddressPair) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Filter) DeepCopyInto(out *Filter) { + *out = *in + if in.DeprecatedAdminStateUp != nil { + in, out := &in.DeprecatedAdminStateUp, &out.DeprecatedAdminStateUp + *out = new(bool) + **out = **in + } + if in.DeprecatedShared != nil { + in, out := &in.DeprecatedShared, &out.DeprecatedShared + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Filter. +func (in *Filter) DeepCopy() *Filter { + if in == nil { + return nil + } + out := new(Filter) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FixedIPs) DeepCopyInto(out *FixedIPs) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FixedIPs. +func (in *FixedIPs) DeepCopy() *FixedIPs { + if in == nil { + return nil + } + out := new(FixedIPs) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkParam) DeepCopyInto(out *NetworkParam) { + *out = *in + in.Filter.DeepCopyInto(&out.Filter) + if in.Subnets != nil { + in, out := &in.Subnets, &out.Subnets + *out = make([]SubnetParam, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PortTags != nil { + in, out := &in.PortTags, &out.PortTags + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Profile != nil { + in, out := &in.Profile, &out.Profile + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.PortSecurity != nil { + in, out := &in.PortSecurity, &out.PortSecurity + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkParam. +func (in *NetworkParam) DeepCopy() *NetworkParam { + if in == nil { + return nil + } + out := new(NetworkParam) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenstackProviderSpec) DeepCopyInto(out *OpenstackProviderSpec) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.CloudsSecret != nil { + in, out := &in.CloudsSecret, &out.CloudsSecret + *out = new(v1.SecretReference) + **out = **in + } + if in.Networks != nil { + in, out := &in.Networks, &out.Networks + *out = make([]NetworkParam, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]PortOpts, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]SecurityGroupParam, len(*in)) + copy(*out, *in) + } + if in.UserDataSecret != nil { + in, out := &in.UserDataSecret, &out.UserDataSecret + *out = new(v1.SecretReference) + **out = **in + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ServerMetadata != nil { + in, out := &in.ServerMetadata, &out.ServerMetadata + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ConfigDrive != nil { + in, out := &in.ConfigDrive, &out.ConfigDrive + *out = new(bool) + **out = **in + } + if in.RootVolume != nil { + in, out := &in.RootVolume, &out.RootVolume + *out = new(RootVolume) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenstackProviderSpec. +func (in *OpenstackProviderSpec) DeepCopy() *OpenstackProviderSpec { + if in == nil { + return nil + } + out := new(OpenstackProviderSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OpenstackProviderSpec) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PortOpts) DeepCopyInto(out *PortOpts) { + *out = *in + if in.AdminStateUp != nil { + in, out := &in.AdminStateUp, &out.AdminStateUp + *out = new(bool) + **out = **in + } + if in.FixedIPs != nil { + in, out := &in.FixedIPs, &out.FixedIPs + *out = make([]FixedIPs, len(*in)) + copy(*out, *in) + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = new([]string) + if **in != nil { + in, out := *in, *out + *out = make([]string, len(*in)) + copy(*out, *in) + } + } + if in.AllowedAddressPairs != nil { + in, out := &in.AllowedAddressPairs, &out.AllowedAddressPairs + *out = make([]AddressPair, len(*in)) + copy(*out, *in) + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Profile != nil { + in, out := &in.Profile, &out.Profile + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.PortSecurity != nil { + in, out := &in.PortSecurity, &out.PortSecurity + *out = new(bool) + **out = **in + } + if in.Trunk != nil { + in, out := &in.Trunk, &out.Trunk + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortOpts. +func (in *PortOpts) DeepCopy() *PortOpts { + if in == nil { + return nil + } + out := new(PortOpts) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RootVolume) DeepCopyInto(out *RootVolume) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RootVolume. +func (in *RootVolume) DeepCopy() *RootVolume { + if in == nil { + return nil + } + out := new(RootVolume) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityGroupFilter) DeepCopyInto(out *SecurityGroupFilter) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityGroupFilter. +func (in *SecurityGroupFilter) DeepCopy() *SecurityGroupFilter { + if in == nil { + return nil + } + out := new(SecurityGroupFilter) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityGroupParam) DeepCopyInto(out *SecurityGroupParam) { + *out = *in + out.Filter = in.Filter + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityGroupParam. +func (in *SecurityGroupParam) DeepCopy() *SecurityGroupParam { + if in == nil { + return nil + } + out := new(SecurityGroupParam) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubnetFilter) DeepCopyInto(out *SubnetFilter) { + *out = *in + if in.DeprecatedEnableDHCP != nil { + in, out := &in.DeprecatedEnableDHCP, &out.DeprecatedEnableDHCP + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubnetFilter. +func (in *SubnetFilter) DeepCopy() *SubnetFilter { + if in == nil { + return nil + } + out := new(SubnetFilter) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubnetParam) DeepCopyInto(out *SubnetParam) { + *out = *in + in.Filter.DeepCopyInto(&out.Filter) + if in.PortTags != nil { + in, out := &in.PortTags, &out.PortTags + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.PortSecurity != nil { + in, out := &in.PortSecurity, &out.PortSecurity + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubnetParam. +func (in *SubnetParam) DeepCopy() *SubnetParam { + if in == nil { + return nil + } + out := new(SubnetParam) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/api/machine/v1alpha1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/machine/v1alpha1/zz_generated.swagger_doc_generated.go new file mode 100644 index 000000000..3ea9595d2 --- /dev/null +++ b/vendor/github.com/openshift/api/machine/v1alpha1/zz_generated.swagger_doc_generated.go @@ -0,0 +1,196 @@ +package v1alpha1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_Filter = map[string]string{ + "id": "Deprecated: use NetworkParam.uuid instead. Ignored if NetworkParam.uuid is set.", + "name": "name filters networks by name.", + "description": "description filters networks by description.", + "tenantId": "tenantId filters networks by tenant ID. Deprecated: use projectId instead. tenantId will be ignored if projectId is set.", + "projectId": "projectId filters networks by project ID.", + "tags": "tags filters by networks containing all specified tags. Multiple tags are comma separated.", + "tagsAny": "tagsAny filters by networks containing any specified tags. Multiple tags are comma separated.", + "notTags": "notTags filters by networks which don't match all specified tags. NOT (t1 AND t2...) Multiple tags are comma separated.", + "notTagsAny": "notTagsAny filters by networks which don't match any specified tags. NOT (t1 OR t2...) Multiple tags are comma separated.", + "status": "Deprecated: status is silently ignored. It has no replacement.", + "adminStateUp": "Deprecated: adminStateUp is silently ignored. It has no replacement.", + "shared": "Deprecated: shared is silently ignored. It has no replacement.", + "marker": "Deprecated: marker is silently ignored. It has no replacement.", + "limit": "Deprecated: limit is silently ignored. It has no replacement.", + "sortKey": "Deprecated: sortKey is silently ignored. It has no replacement.", + "sortDir": "Deprecated: sortDir is silently ignored. It has no replacement.", +} + +func (Filter) SwaggerDoc() map[string]string { + return map_Filter +} + +var map_FixedIPs = map[string]string{ + "subnetID": "subnetID specifies the ID of the subnet where the fixed IP will be allocated.", + "ipAddress": "ipAddress is a specific IP address to use in the given subnet. Port creation will fail if the address is not available. If not specified, an available IP from the given subnet will be selected automatically.", +} + +func (FixedIPs) SwaggerDoc() map[string]string { + return map_FixedIPs +} + +var map_NetworkParam = map[string]string{ + "uuid": "The UUID of the network. Required if you omit the port attribute.", + "fixedIp": "A fixed IPv4 address for the NIC.", + "filter": "Filters for optional network query", + "subnets": "Subnet within a network to use", + "noAllowedAddressPairs": "NoAllowedAddressPairs disables creation of allowed address pairs for the network ports", + "portTags": "PortTags allows users to specify a list of tags to add to ports created in a given network", + "vnicType": "The virtual network interface card (vNIC) type that is bound to the neutron port.", + "profile": "A dictionary that enables the application running on the specified host to pass and receive virtual network interface (VIF) port-specific information to the plug-in.", + "portSecurity": "PortSecurity optionally enables or disables security on ports managed by OpenStack", +} + +func (NetworkParam) SwaggerDoc() map[string]string { + return map_NetworkParam +} + +var map_OpenstackProviderSpec = map[string]string{ + "": "OpenstackProviderSpec is the type that will be embedded in a Machine.Spec.ProviderSpec field for an OpenStack Instance. It is used by the Openstack machine actuator to create a single machine instance. Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "cloudsSecret": "The name of the secret containing the openstack credentials", + "cloudName": "The name of the cloud to use from the clouds secret", + "flavor": "The flavor reference for the flavor for your server instance.", + "image": "The name of the image to use for your server instance. If the RootVolume is specified, this will be ignored and use rootVolume directly.", + "keyName": "The ssh key to inject in the instance", + "sshUserName": "The machine ssh username", + "networks": "A networks object. Required parameter when there are multiple networks defined for the tenant. When you do not specify the networks parameter, the server attaches to the only network created for the current tenant.", + "ports": "Create and assign additional ports to instances", + "floatingIP": "floatingIP specifies a floating IP to be associated with the machine. Note that it is not safe to use this parameter in a MachineSet, as only one Machine may be assigned the same floating IP.\n\nDeprecated: floatingIP will be removed in a future release as it cannot be implemented correctly.", + "availabilityZone": "The availability zone from which to launch the server.", + "securityGroups": "The names of the security groups to assign to the instance", + "userDataSecret": "The name of the secret containing the user data (startup script in most cases)", + "trunk": "Whether the server instance is created on a trunk port or not.", + "tags": "Machine tags Requires Nova api 2.52 minimum!", + "serverMetadata": "Metadata mapping. Allows you to create a map of key value pairs to add to the server instance.", + "configDrive": "Config Drive support", + "rootVolume": "The volume metadata to boot from", + "serverGroupID": "The server group to assign the machine to.", + "serverGroupName": "The server group to assign the machine to. A server group with that name will be created if it does not exist. If both ServerGroupID and ServerGroupName are non-empty, they must refer to the same OpenStack resource.", + "primarySubnet": "The subnet that a set of machines will get ingress/egress traffic from", +} + +func (OpenstackProviderSpec) SwaggerDoc() map[string]string { + return map_OpenstackProviderSpec +} + +var map_PortOpts = map[string]string{ + "networkID": "networkID is the ID of the network the port will be created in. It is required.", + "nameSuffix": "If nameSuffix is specified the created port will be named -. If not specified the port will be named -.", + "description": "description specifies the description of the created port.", + "adminStateUp": "adminStateUp sets the administrative state of the created port to up (true), or down (false).", + "macAddress": "macAddress specifies the MAC address of the created port.", + "fixedIPs": "fixedIPs specifies a set of fixed IPs to assign to the port. They must all be valid for the port's network.", + "tenantID": "tenantID specifies the tenant ID of the created port. Note that this requires OpenShift to have administrative permissions, which is typically not the case. Use of this field is not recommended. Deprecated: use projectID instead. It will be ignored if projectID is set.", + "projectID": "projectID specifies the project ID of the created port. Note that this requires OpenShift to have administrative permissions, which is typically not the case. Use of this field is not recommended.", + "securityGroups": "securityGroups specifies a set of security group UUIDs to use instead of the machine's default security groups. The default security groups will be used if this is left empty or not specified.", + "allowedAddressPairs": "allowedAddressPairs specifies a set of allowed address pairs to add to the port.", + "tags": "tags species a set of tags to add to the port.", + "vnicType": "The virtual network interface card (vNIC) type that is bound to the neutron port.", + "profile": "A dictionary that enables the application running on the specified host to pass and receive virtual network interface (VIF) port-specific information to the plug-in.", + "portSecurity": "enable or disable security on a given port incompatible with securityGroups and allowedAddressPairs", + "trunk": "Enables and disables trunk at port level. If not provided, openStackMachine.Spec.Trunk is inherited.", + "hostID": "The ID of the host where the port is allocated. Do not use this field: it cannot be used correctly. Deprecated: hostID is silently ignored. It will be removed with no replacement.", +} + +func (PortOpts) SwaggerDoc() map[string]string { + return map_PortOpts +} + +var map_RootVolume = map[string]string{ + "sourceUUID": "sourceUUID specifies the UUID of a glance image used to populate the root volume. Deprecated: set image in the platform spec instead. This will be ignored if image is set in the platform spec.", + "volumeType": "volumeType specifies a volume type to use when creating the root volume. If not specified the default volume type will be used.", + "diskSize": "diskSize specifies the size, in GB, of the created root volume.", + "availabilityZone": "availabilityZone specifies the Cinder availability where the root volume will be created.", + "sourceType": "Deprecated: sourceType will be silently ignored. There is no replacement.", + "deviceType": "Deprecated: deviceType will be silently ignored. There is no replacement.", +} + +func (RootVolume) SwaggerDoc() map[string]string { + return map_RootVolume +} + +var map_SecurityGroupFilter = map[string]string{ + "id": "id specifies the ID of a security group to use. If set, id will not be validated before use. An invalid id will result in failure to create a server with an appropriate error message.", + "name": "name filters security groups by name.", + "description": "description filters security groups by description.", + "tenantId": "tenantId filters security groups by tenant ID. Deprecated: use projectId instead. tenantId will be ignored if projectId is set.", + "projectId": "projectId filters security groups by project ID.", + "tags": "tags filters by security groups containing all specified tags. Multiple tags are comma separated.", + "tagsAny": "tagsAny filters by security groups containing any specified tags. Multiple tags are comma separated.", + "notTags": "notTags filters by security groups which don't match all specified tags. NOT (t1 AND t2...) Multiple tags are comma separated.", + "notTagsAny": "notTagsAny filters by security groups which don't match any specified tags. NOT (t1 OR t2...) Multiple tags are comma separated.", + "limit": "Deprecated: limit is silently ignored. It has no replacement.", + "marker": "Deprecated: marker is silently ignored. It has no replacement.", + "sortKey": "Deprecated: sortKey is silently ignored. It has no replacement.", + "sortDir": "Deprecated: sortDir is silently ignored. It has no replacement.", +} + +func (SecurityGroupFilter) SwaggerDoc() map[string]string { + return map_SecurityGroupFilter +} + +var map_SecurityGroupParam = map[string]string{ + "uuid": "Security Group UUID", + "name": "Security Group name", + "filter": "Filters used to query security groups in openstack", +} + +func (SecurityGroupParam) SwaggerDoc() map[string]string { + return map_SecurityGroupParam +} + +var map_SubnetFilter = map[string]string{ + "id": "id is the uuid of a specific subnet to use. If specified, id will not be validated. Instead server creation will fail with an appropriate error.", + "name": "name filters subnets by name.", + "description": "description filters subnets by description.", + "networkId": "Deprecated: networkId is silently ignored. Set uuid on the containing network definition instead.", + "tenantId": "tenantId filters subnets by tenant ID. Deprecated: use projectId instead. tenantId will be ignored if projectId is set.", + "projectId": "projectId filters subnets by project ID.", + "ipVersion": "ipVersion filters subnets by IP version.", + "gateway_ip": "gateway_ip filters subnets by gateway IP.", + "cidr": "cidr filters subnets by CIDR.", + "ipv6AddressMode": "ipv6AddressMode filters subnets by IPv6 address mode.", + "ipv6RaMode": "ipv6RaMode filters subnets by IPv6 router adversiement mode.", + "subnetpoolId": "subnetpoolId filters subnets by subnet pool ID.", + "tags": "tags filters by subnets containing all specified tags. Multiple tags are comma separated.", + "tagsAny": "tagsAny filters by subnets containing any specified tags. Multiple tags are comma separated.", + "notTags": "notTags filters by subnets which don't match all specified tags. NOT (t1 AND t2...) Multiple tags are comma separated.", + "notTagsAny": "notTagsAny filters by subnets which don't match any specified tags. NOT (t1 OR t2...) Multiple tags are comma separated.", + "enableDhcp": "Deprecated: enableDhcp is silently ignored. It has no replacement.", + "limit": "Deprecated: limit is silently ignored. It has no replacement.", + "marker": "Deprecated: marker is silently ignored. It has no replacement.", + "sortKey": "Deprecated: sortKey is silently ignored. It has no replacement.", + "sortDir": "Deprecated: sortDir is silently ignored. It has no replacement.", +} + +func (SubnetFilter) SwaggerDoc() map[string]string { + return map_SubnetFilter +} + +var map_SubnetParam = map[string]string{ + "uuid": "The UUID of the network. Required if you omit the port attribute.", + "filter": "Filters for optional network query", + "portTags": "PortTags are tags that are added to ports created on this subnet", + "portSecurity": "PortSecurity optionally enables or disables security on ports managed by OpenStack", +} + +func (SubnetParam) SwaggerDoc() map[string]string { + return map_SubnetParam +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/api/machine/v1beta1/0000_10_machine.crd.yaml b/vendor/github.com/openshift/api/machine/v1beta1/0000_10_machine.crd.yaml new file mode 100644 index 000000000..6de9c06b2 --- /dev/null +++ b/vendor/github.com/openshift/api/machine/v1beta1/0000_10_machine.crd.yaml @@ -0,0 +1,329 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + capability.openshift.io/name: MachineAPI + api-approved.openshift.io: https://github.com/openshift/api/pull/948 + exclude.release.openshift.io/internal-openshift-hosted: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: machines.machine.openshift.io +spec: + group: machine.openshift.io + names: + kind: Machine + listKind: MachineList + plural: machines + singular: machine + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Phase of machine + jsonPath: .status.phase + name: Phase + type: string + - description: Type of instance + jsonPath: .metadata.labels['machine\.openshift\.io/instance-type'] + name: Type + type: string + - description: Region associated with machine + jsonPath: .metadata.labels['machine\.openshift\.io/region'] + name: Region + type: string + - description: Zone associated with machine + jsonPath: .metadata.labels['machine\.openshift\.io/zone'] + name: Zone + type: string + - description: Machine age + jsonPath: .metadata.creationTimestamp + name: Age + type: date + - description: Node associated with machine + jsonPath: .status.nodeRef.name + name: Node + priority: 1 + type: string + - description: Provider ID of machine created in cloud provider + jsonPath: .spec.providerID + name: ProviderID + priority: 1 + type: string + - description: State of instance + jsonPath: .metadata.annotations['machine\.openshift\.io/instance-state'] + name: State + priority: 1 + type: string + name: v1beta1 + schema: + openAPIV3Schema: + description: 'Machine is the Schema for the machines API Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).' + type: object + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: MachineSpec defines the desired state of Machine + type: object + properties: + lifecycleHooks: + description: LifecycleHooks allow users to pause operations on the machine at certain predefined points within the machine lifecycle. + type: object + properties: + preDrain: + description: PreDrain hooks prevent the machine from being drained. This also blocks further lifecycle events, such as termination. + type: array + items: + description: LifecycleHook represents a single instance of a lifecycle hook + type: object + required: + - name + - owner + properties: + name: + description: Name defines a unique name for the lifcycle hook. The name should be unique and descriptive, ideally 1-3 words, in CamelCase or it may be namespaced, eg. foo.example.com/CamelCase. Names must be unique and should only be managed by a single entity. + type: string + maxLength: 256 + minLength: 3 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + owner: + description: Owner defines the owner of the lifecycle hook. This should be descriptive enough so that users can identify who/what is responsible for blocking the lifecycle. This could be the name of a controller (e.g. clusteroperator/etcd) or an administrator managing the hook. + type: string + maxLength: 512 + minLength: 3 + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + preTerminate: + description: PreTerminate hooks prevent the machine from being terminated. PreTerminate hooks be actioned after the Machine has been drained. + type: array + items: + description: LifecycleHook represents a single instance of a lifecycle hook + type: object + required: + - name + - owner + properties: + name: + description: Name defines a unique name for the lifcycle hook. The name should be unique and descriptive, ideally 1-3 words, in CamelCase or it may be namespaced, eg. foo.example.com/CamelCase. Names must be unique and should only be managed by a single entity. + type: string + maxLength: 256 + minLength: 3 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + owner: + description: Owner defines the owner of the lifecycle hook. This should be descriptive enough so that users can identify who/what is responsible for blocking the lifecycle. This could be the name of a controller (e.g. clusteroperator/etcd) or an administrator managing the hook. + type: string + maxLength: 512 + minLength: 3 + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + metadata: + description: ObjectMeta will autopopulate the Node created. Use this to indicate what labels, annotations, name prefix, etc., should be used when creating the Node. + type: object + properties: + annotations: + description: 'Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations' + type: object + additionalProperties: + type: string + generateName: + description: "GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server. \n If this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header). \n Applied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency" + type: string + labels: + description: 'Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels' + type: object + additionalProperties: + type: string + name: + description: 'Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + namespace: + description: "Namespace defines the space within each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty. \n Must be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces" + type: string + ownerReferences: + description: List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller. + type: array + items: + description: OwnerReference contains enough information to let you identify an owning object. An owning object must be in the same namespace as the dependent, or be cluster-scoped, so there is no namespace field. + type: object + required: + - apiVersion + - kind + - name + - uid + properties: + apiVersion: + description: API version of the referent. + type: string + blockOwnerDeletion: + description: If true, AND if the owner has the "foregroundDeletion" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. See https://kubernetes.io/docs/concepts/architecture/garbage-collection/#foreground-deletion for how the garbage collector interacts with this field and enforces the foreground deletion. Defaults to false. To set this field, a user needs "delete" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned. + type: boolean + controller: + description: If true, this reference points to the managing controller. + type: boolean + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids' + type: string + x-kubernetes-map-type: atomic + providerID: + description: ProviderID is the identification ID of the machine provided by the provider. This field must match the provider ID as seen on the node object corresponding to this machine. This field is required by higher level consumers of cluster-api. Example use case is cluster autoscaler with cluster-api as provider. Clean-up logic in the autoscaler compares machines to nodes to find out machines at provider which could not get registered as Kubernetes nodes. With cluster-api as a generic out-of-tree provider for autoscaler, this field is required by autoscaler to be able to have a provider view of the list of machines. Another list of nodes is queried from the k8s apiserver and then a comparison is done to find out unregistered machines and are marked for delete. This field will be set by the actuators and consumed by higher level entities like autoscaler that will be interfacing with cluster-api as generic provider. + type: string + providerSpec: + description: ProviderSpec details Provider-specific configuration to use during node creation. + type: object + properties: + value: + description: Value is an inlined, serialized representation of the resource configuration. It is recommended that providers maintain their own versioned API types that should be serialized/deserialized from this field, akin to component config. + type: object + x-kubernetes-preserve-unknown-fields: true + taints: + description: The list of the taints to be applied to the corresponding Node in additive manner. This list will not overwrite any other taints added to the Node on an ongoing basis by other entities. These taints should be actively reconciled e.g. if you ask the machine controller to apply a taint and then manually remove the taint the machine controller will put it back) but not have the machine controller remove any taints + type: array + items: + description: The node this Taint is attached to has the "effect" on any pod that does not tolerate the Taint. + type: object + required: + - effect + - key + properties: + effect: + description: Required. The effect of the taint on pods that do not tolerate the taint. Valid effects are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Required. The taint key to be applied to a node. + type: string + timeAdded: + description: TimeAdded represents the time at which the taint was added. It is only written for NoExecute taints. + type: string + format: date-time + value: + description: The taint value corresponding to the taint key. + type: string + status: + description: MachineStatus defines the observed state of Machine + type: object + properties: + addresses: + description: Addresses is a list of addresses assigned to the machine. Queried from cloud provider, if available. + type: array + items: + description: NodeAddress contains information for the node's address. + type: object + required: + - address + - type + properties: + address: + description: The node address. + type: string + type: + description: Node address type, one of Hostname, ExternalIP or InternalIP. + type: string + conditions: + description: Conditions defines the current state of the Machine + type: array + items: + description: Condition defines an observation of a Machine API resource operational state. + type: object + properties: + lastTransitionTime: + description: Last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + type: string + format: date-time + message: + description: A human readable message indicating details about the transition. This field may be empty. + type: string + reason: + description: The reason for the condition's last transition in CamelCase. The specific API may choose whether or not this field is considered a guaranteed API. This field may not be empty. + type: string + severity: + description: Severity provides an explicit classification of Reason code, so the users or machines can immediately understand the current situation and act accordingly. The Severity field MUST be set only when Status=False. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: Type of condition in CamelCase or in foo.example.com/CamelCase. Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. + type: string + errorMessage: + description: "ErrorMessage will be set in the event that there is a terminal problem reconciling the Machine and will contain a more verbose string suitable for logging and human consumption. \n This field should not be set for transitive errors that a controller faces that are expected to be fixed automatically over time (like service outages), but instead indicate that something is fundamentally wrong with the Machine's spec or the configuration of the controller, and that manual intervention is required. Examples of terminal errors would be invalid combinations of settings in the spec, values that are unsupported by the controller, or the responsible controller itself being critically misconfigured. \n Any transient errors that occur during the reconciliation of Machines can be added as events to the Machine object and/or logged in the controller's output." + type: string + errorReason: + description: "ErrorReason will be set in the event that there is a terminal problem reconciling the Machine and will contain a succinct value suitable for machine interpretation. \n This field should not be set for transitive errors that a controller faces that are expected to be fixed automatically over time (like service outages), but instead indicate that something is fundamentally wrong with the Machine's spec or the configuration of the controller, and that manual intervention is required. Examples of terminal errors would be invalid combinations of settings in the spec, values that are unsupported by the controller, or the responsible controller itself being critically misconfigured. \n Any transient errors that occur during the reconciliation of Machines can be added as events to the Machine object and/or logged in the controller's output." + type: string + lastOperation: + description: LastOperation describes the last-operation performed by the machine-controller. This API should be useful as a history in terms of the latest operation performed on the specific machine. It should also convey the state of the latest-operation for example if it is still on-going, failed or completed successfully. + type: object + properties: + description: + description: Description is the human-readable description of the last operation. + type: string + lastUpdated: + description: LastUpdated is the timestamp at which LastOperation API was last-updated. + type: string + format: date-time + state: + description: State is the current status of the last performed operation. E.g. Processing, Failed, Successful etc + type: string + type: + description: Type is the type of operation which was last performed. E.g. Create, Delete, Update etc + type: string + lastUpdated: + description: LastUpdated identifies when this status was last observed. + type: string + format: date-time + nodeRef: + description: NodeRef will point to the corresponding Node if it exists. + type: object + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. TODO: this design is not final and this field is subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + x-kubernetes-map-type: atomic + phase: + description: 'Phase represents the current phase of machine actuation. One of: Failed, Provisioning, Provisioned, Running, Deleting' + type: string + providerStatus: + description: ProviderStatus details a Provider-specific status. It is recommended that providers maintain their own versioned API types that should be serialized/deserialized from this field. + type: object + x-kubernetes-preserve-unknown-fields: true + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/vendor/github.com/openshift/api/machine/v1beta1/0000_10_machinehealthcheck.yaml b/vendor/github.com/openshift/api/machine/v1beta1/0000_10_machinehealthcheck.yaml new file mode 100644 index 000000000..614b7a724 --- /dev/null +++ b/vendor/github.com/openshift/api/machine/v1beta1/0000_10_machinehealthcheck.yaml @@ -0,0 +1,195 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + capability.openshift.io/name: MachineAPI + exclude.release.openshift.io/internal-openshift-hosted: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + api-approved.openshift.io: https://github.com/openshift/api/pull/1032 + creationTimestamp: null + name: machinehealthchecks.machine.openshift.io +spec: + group: machine.openshift.io + names: + kind: MachineHealthCheck + listKind: MachineHealthCheckList + plural: machinehealthchecks + shortNames: + - mhc + - mhcs + singular: machinehealthcheck + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Maximum number of unhealthy machines allowed + jsonPath: .spec.maxUnhealthy + name: MaxUnhealthy + type: string + - description: Number of machines currently monitored + jsonPath: .status.expectedMachines + name: ExpectedMachines + type: integer + - description: Current observed healthy machines + jsonPath: .status.currentHealthy + name: CurrentHealthy + type: integer + name: v1beta1 + schema: + openAPIV3Schema: + description: 'MachineHealthCheck is the Schema for the machinehealthchecks API Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).' + type: object + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Specification of machine health check policy + type: object + properties: + maxUnhealthy: + description: Any farther remediation is only allowed if at most "MaxUnhealthy" machines selected by "selector" are not healthy. Expects either a postive integer value or a percentage value. Percentage values must be positive whole numbers and are capped at 100%. Both 0 and 0% are valid and will block all remediation. + default: 100% + pattern: ^((100|[0-9]{1,2})%|[0-9]+)$ + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + nodeStartupTimeout: + description: Machines older than this duration without a node will be considered to have failed and will be remediated. To prevent Machines without Nodes from being removed, disable startup checks by setting this value explicitly to "0". Expects an unsigned duration string of decimal numbers each with optional fraction and a unit suffix, eg "300ms", "1.5h" or "2h45m". Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". + type: string + default: 10m + pattern: ^0|([0-9]+(\.[0-9]+)?(ns|us|µs|ms|s|m|h))+$ + remediationTemplate: + description: "RemediationTemplate is a reference to a remediation template provided by an infrastructure provider. \n This field is completely optional, when filled, the MachineHealthCheck controller creates a new object from the template referenced and hands off remediation of the machine to a controller that lives outside of Machine API Operator." + type: object + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. TODO: this design is not final and this field is subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + x-kubernetes-map-type: atomic + selector: + description: 'Label selector to match machines whose health will be exercised. Note: An empty selector will match all machines.' + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + unhealthyConditions: + description: UnhealthyConditions contains a list of the conditions that determine whether a node is considered unhealthy. The conditions are combined in a logical OR, i.e. if any of the conditions is met, the node is unhealthy. + type: array + minItems: 1 + items: + description: UnhealthyCondition represents a Node condition type and value with a timeout specified as a duration. When the named condition has been in the given status for at least the timeout value, a node is considered unhealthy. + type: object + properties: + status: + type: string + minLength: 1 + timeout: + description: Expects an unsigned duration string of decimal numbers each with optional fraction and a unit suffix, eg "300ms", "1.5h" or "2h45m". Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". + type: string + pattern: ^([0-9]+(\.[0-9]+)?(ns|us|µs|ms|s|m|h))+$ + type: + type: string + minLength: 1 + status: + description: Most recently observed status of MachineHealthCheck resource + type: object + properties: + conditions: + description: Conditions defines the current state of the MachineHealthCheck + type: array + items: + description: Condition defines an observation of a Machine API resource operational state. + type: object + properties: + lastTransitionTime: + description: Last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + type: string + format: date-time + message: + description: A human readable message indicating details about the transition. This field may be empty. + type: string + reason: + description: The reason for the condition's last transition in CamelCase. The specific API may choose whether or not this field is considered a guaranteed API. This field may not be empty. + type: string + severity: + description: Severity provides an explicit classification of Reason code, so the users or machines can immediately understand the current situation and act accordingly. The Severity field MUST be set only when Status=False. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: Type of condition in CamelCase or in foo.example.com/CamelCase. Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. + type: string + currentHealthy: + description: total number of machines counted by this machine health check + type: integer + minimum: 0 + expectedMachines: + description: total number of machines counted by this machine health check + type: integer + minimum: 0 + remediationsAllowed: + description: RemediationsAllowed is the number of further remediations allowed by this machine health check before maxUnhealthy short circuiting will be applied + type: integer + format: int32 + minimum: 0 + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/vendor/github.com/openshift/api/machine/v1beta1/0000_10_machineset.crd.yaml b/vendor/github.com/openshift/api/machine/v1beta1/0000_10_machineset.crd.yaml new file mode 100644 index 000000000..2145f033f --- /dev/null +++ b/vendor/github.com/openshift/api/machine/v1beta1/0000_10_machineset.crd.yaml @@ -0,0 +1,351 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + capability.openshift.io/name: MachineAPI + api-approved.openshift.io: https://github.com/openshift/api/pull/1032 + exclude.release.openshift.io/internal-openshift-hosted: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + creationTimestamp: null + name: machinesets.machine.openshift.io +spec: + group: machine.openshift.io + names: + kind: MachineSet + listKind: MachineSetList + plural: machinesets + singular: machineset + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Desired Replicas + jsonPath: .spec.replicas + name: Desired + type: integer + - description: Current Replicas + jsonPath: .status.replicas + name: Current + type: integer + - description: Ready Replicas + jsonPath: .status.readyReplicas + name: Ready + type: integer + - description: Observed number of available replicas + jsonPath: .status.availableReplicas + name: Available + type: string + - description: Machineset age + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta1 + schema: + openAPIV3Schema: + description: 'MachineSet ensures that a specified number of machines replicas are running at any given time. Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).' + type: object + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: MachineSetSpec defines the desired state of MachineSet + type: object + properties: + deletePolicy: + description: DeletePolicy defines the policy used to identify nodes to delete when downscaling. Defaults to "Random". Valid values are "Random, "Newest", "Oldest" + type: string + enum: + - Random + - Newest + - Oldest + minReadySeconds: + description: MinReadySeconds is the minimum number of seconds for which a newly created machine should be ready. Defaults to 0 (machine will be considered available as soon as it is ready) + type: integer + format: int32 + replicas: + description: Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. + type: integer + format: int32 + default: 1 + selector: + description: 'Selector is a label query over machines that should match the replica count. Label keys and values that must match in order to be controlled by this MachineSet. It must match the machine template''s labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors' + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + template: + description: Template is the object that describes the machine that will be created if insufficient replicas are detected. + type: object + properties: + metadata: + description: 'Standard object''s metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata' + type: object + properties: + annotations: + description: 'Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations' + type: object + additionalProperties: + type: string + generateName: + description: "GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server. \n If this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header). \n Applied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency" + type: string + labels: + description: 'Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels' + type: object + additionalProperties: + type: string + name: + description: 'Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + namespace: + description: "Namespace defines the space within each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty. \n Must be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces" + type: string + ownerReferences: + description: List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller. + type: array + items: + description: OwnerReference contains enough information to let you identify an owning object. An owning object must be in the same namespace as the dependent, or be cluster-scoped, so there is no namespace field. + type: object + required: + - apiVersion + - kind + - name + - uid + properties: + apiVersion: + description: API version of the referent. + type: string + blockOwnerDeletion: + description: If true, AND if the owner has the "foregroundDeletion" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. See https://kubernetes.io/docs/concepts/architecture/garbage-collection/#foreground-deletion for how the garbage collector interacts with this field and enforces the foreground deletion. Defaults to false. To set this field, a user needs "delete" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned. + type: boolean + controller: + description: If true, this reference points to the managing controller. + type: boolean + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids' + type: string + x-kubernetes-map-type: atomic + spec: + description: 'Specification of the desired behavior of the machine. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status' + type: object + properties: + lifecycleHooks: + description: LifecycleHooks allow users to pause operations on the machine at certain predefined points within the machine lifecycle. + type: object + properties: + preDrain: + description: PreDrain hooks prevent the machine from being drained. This also blocks further lifecycle events, such as termination. + type: array + items: + description: LifecycleHook represents a single instance of a lifecycle hook + type: object + required: + - name + - owner + properties: + name: + description: Name defines a unique name for the lifcycle hook. The name should be unique and descriptive, ideally 1-3 words, in CamelCase or it may be namespaced, eg. foo.example.com/CamelCase. Names must be unique and should only be managed by a single entity. + type: string + maxLength: 256 + minLength: 3 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + owner: + description: Owner defines the owner of the lifecycle hook. This should be descriptive enough so that users can identify who/what is responsible for blocking the lifecycle. This could be the name of a controller (e.g. clusteroperator/etcd) or an administrator managing the hook. + type: string + maxLength: 512 + minLength: 3 + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + preTerminate: + description: PreTerminate hooks prevent the machine from being terminated. PreTerminate hooks be actioned after the Machine has been drained. + type: array + items: + description: LifecycleHook represents a single instance of a lifecycle hook + type: object + required: + - name + - owner + properties: + name: + description: Name defines a unique name for the lifcycle hook. The name should be unique and descriptive, ideally 1-3 words, in CamelCase or it may be namespaced, eg. foo.example.com/CamelCase. Names must be unique and should only be managed by a single entity. + type: string + maxLength: 256 + minLength: 3 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + owner: + description: Owner defines the owner of the lifecycle hook. This should be descriptive enough so that users can identify who/what is responsible for blocking the lifecycle. This could be the name of a controller (e.g. clusteroperator/etcd) or an administrator managing the hook. + type: string + maxLength: 512 + minLength: 3 + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + metadata: + description: ObjectMeta will autopopulate the Node created. Use this to indicate what labels, annotations, name prefix, etc., should be used when creating the Node. + type: object + properties: + annotations: + description: 'Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations' + type: object + additionalProperties: + type: string + generateName: + description: "GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server. \n If this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header). \n Applied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency" + type: string + labels: + description: 'Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels' + type: object + additionalProperties: + type: string + name: + description: 'Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names' + type: string + namespace: + description: "Namespace defines the space within each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty. \n Must be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces" + type: string + ownerReferences: + description: List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller. + type: array + items: + description: OwnerReference contains enough information to let you identify an owning object. An owning object must be in the same namespace as the dependent, or be cluster-scoped, so there is no namespace field. + type: object + required: + - apiVersion + - kind + - name + - uid + properties: + apiVersion: + description: API version of the referent. + type: string + blockOwnerDeletion: + description: If true, AND if the owner has the "foregroundDeletion" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. See https://kubernetes.io/docs/concepts/architecture/garbage-collection/#foreground-deletion for how the garbage collector interacts with this field and enforces the foreground deletion. Defaults to false. To set this field, a user needs "delete" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned. + type: boolean + controller: + description: If true, this reference points to the managing controller. + type: boolean + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids' + type: string + x-kubernetes-map-type: atomic + providerID: + description: ProviderID is the identification ID of the machine provided by the provider. This field must match the provider ID as seen on the node object corresponding to this machine. This field is required by higher level consumers of cluster-api. Example use case is cluster autoscaler with cluster-api as provider. Clean-up logic in the autoscaler compares machines to nodes to find out machines at provider which could not get registered as Kubernetes nodes. With cluster-api as a generic out-of-tree provider for autoscaler, this field is required by autoscaler to be able to have a provider view of the list of machines. Another list of nodes is queried from the k8s apiserver and then a comparison is done to find out unregistered machines and are marked for delete. This field will be set by the actuators and consumed by higher level entities like autoscaler that will be interfacing with cluster-api as generic provider. + type: string + providerSpec: + description: ProviderSpec details Provider-specific configuration to use during node creation. + type: object + properties: + value: + description: Value is an inlined, serialized representation of the resource configuration. It is recommended that providers maintain their own versioned API types that should be serialized/deserialized from this field, akin to component config. + type: object + x-kubernetes-preserve-unknown-fields: true + taints: + description: The list of the taints to be applied to the corresponding Node in additive manner. This list will not overwrite any other taints added to the Node on an ongoing basis by other entities. These taints should be actively reconciled e.g. if you ask the machine controller to apply a taint and then manually remove the taint the machine controller will put it back) but not have the machine controller remove any taints + type: array + items: + description: The node this Taint is attached to has the "effect" on any pod that does not tolerate the Taint. + type: object + required: + - effect + - key + properties: + effect: + description: Required. The effect of the taint on pods that do not tolerate the taint. Valid effects are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Required. The taint key to be applied to a node. + type: string + timeAdded: + description: TimeAdded represents the time at which the taint was added. It is only written for NoExecute taints. + type: string + format: date-time + value: + description: The taint value corresponding to the taint key. + type: string + status: + description: MachineSetStatus defines the observed state of MachineSet + type: object + properties: + availableReplicas: + description: The number of available replicas (ready for at least minReadySeconds) for this MachineSet. + type: integer + format: int32 + errorMessage: + type: string + errorReason: + description: "In the event that there is a terminal problem reconciling the replicas, both ErrorReason and ErrorMessage will be set. ErrorReason will be populated with a succinct value suitable for machine interpretation, while ErrorMessage will contain a more verbose string suitable for logging and human consumption. \n These fields should not be set for transitive errors that a controller faces that are expected to be fixed automatically over time (like service outages), but instead indicate that something is fundamentally wrong with the MachineTemplate's spec or the configuration of the machine controller, and that manual intervention is required. Examples of terminal errors would be invalid combinations of settings in the spec, values that are unsupported by the machine controller, or the responsible machine controller itself being critically misconfigured. \n Any transient errors that occur during the reconciliation of Machines can be added as events to the MachineSet object and/or logged in the controller's output." + type: string + fullyLabeledReplicas: + description: The number of replicas that have labels matching the labels of the machine template of the MachineSet. + type: integer + format: int32 + observedGeneration: + description: ObservedGeneration reflects the generation of the most recently observed MachineSet. + type: integer + format: int64 + readyReplicas: + description: The number of ready replicas for this MachineSet. A machine is considered ready when the node has been created and is "Ready". + type: integer + format: int32 + replicas: + description: Replicas is the most recently observed number of replicas. + type: integer + format: int32 + served: true + storage: true + subresources: + scale: + labelSelectorPath: .status.labelSelector + specReplicasPath: .spec.replicas + statusReplicasPath: .status.replicas + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/vendor/github.com/openshift/api/machine/v1beta1/Makefile b/vendor/github.com/openshift/api/machine/v1beta1/Makefile new file mode 100644 index 000000000..fee9e68fc --- /dev/null +++ b/vendor/github.com/openshift/api/machine/v1beta1/Makefile @@ -0,0 +1,3 @@ +.PHONY: test +test: + make -C ../../tests test GINKGO_EXTRA_ARGS=--focus="machine.openshift.io/v1beta1" diff --git a/vendor/github.com/openshift/api/machine/v1beta1/doc.go b/vendor/github.com/openshift/api/machine/v1beta1/doc.go new file mode 100644 index 000000000..e14fc64e3 --- /dev/null +++ b/vendor/github.com/openshift/api/machine/v1beta1/doc.go @@ -0,0 +1,7 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true + +// +kubebuilder:validation:Optional +// +groupName=machine.openshift.io +package v1beta1 diff --git a/vendor/github.com/openshift/api/machine/v1beta1/register.go b/vendor/github.com/openshift/api/machine/v1beta1/register.go new file mode 100644 index 000000000..a3678c007 --- /dev/null +++ b/vendor/github.com/openshift/api/machine/v1beta1/register.go @@ -0,0 +1,44 @@ +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + GroupName = "machine.openshift.io" + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // Install is a function which adds this version to a scheme + Install = schemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = schemeBuilder.AddToScheme +) + +// Resource generated code relies on this being here, but it logically belongs to the group +// DEPRECATED +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + metav1.AddToGroupVersion(scheme, GroupVersion) + + scheme.AddKnownTypes(GroupVersion, + &Machine{}, + &MachineList{}, + &MachineSet{}, + &MachineSetList{}, + &MachineHealthCheck{}, + &MachineHealthCheckList{}, + ) + + return nil +} diff --git a/vendor/github.com/openshift/api/machine/v1beta1/stable.machine.testsuite.yaml b/vendor/github.com/openshift/api/machine/v1beta1/stable.machine.testsuite.yaml new file mode 100644 index 000000000..2a7e0d62c --- /dev/null +++ b/vendor/github.com/openshift/api/machine/v1beta1/stable.machine.testsuite.yaml @@ -0,0 +1,14 @@ +apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this +name: "[Stable] Machine" +crd: 0000_10_machine.crd.yaml +tests: + onCreate: + - name: Should be able to create a minimal Machine + initial: | + apiVersion: machine.openshift.io/v1beta1 + kind: Machine + spec: {} # No spec is required for a Machine + expected: | + apiVersion: machine.openshift.io/v1beta1 + kind: Machine + spec: {} diff --git a/vendor/github.com/openshift/api/machine/v1beta1/stable.machinehealthcheck.testsuite.yaml b/vendor/github.com/openshift/api/machine/v1beta1/stable.machinehealthcheck.testsuite.yaml new file mode 100644 index 000000000..703bcdef1 --- /dev/null +++ b/vendor/github.com/openshift/api/machine/v1beta1/stable.machinehealthcheck.testsuite.yaml @@ -0,0 +1,16 @@ +apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this +name: "[Stable] MachineHealthCheck" +crd: 0000_10_machinehealthcheck.yaml +tests: + onCreate: + - name: Should be able to create a minimal MachineHealthCheck + initial: | + apiVersion: machine.openshift.io/v1beta1 + kind: MachineHealthCheck + spec: {} # No spec is required for a MachineHealthCheck + expected: | + apiVersion: machine.openshift.io/v1beta1 + kind: MachineHealthCheck + spec: + maxUnhealthy: 100% + nodeStartupTimeout: 10m diff --git a/vendor/github.com/openshift/api/machine/v1beta1/stable.machineset.testsuite.yaml b/vendor/github.com/openshift/api/machine/v1beta1/stable.machineset.testsuite.yaml new file mode 100644 index 000000000..f4dbda11b --- /dev/null +++ b/vendor/github.com/openshift/api/machine/v1beta1/stable.machineset.testsuite.yaml @@ -0,0 +1,15 @@ +apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this +name: "[Stable] MachineSet" +crd: 0000_10_machineset.crd.yaml +tests: + onCreate: + - name: Should be able to create a minimal MachineSet + initial: | + apiVersion: machine.openshift.io/v1beta1 + kind: MachineSet + spec: {} # No spec is required for a MachineSet + expected: | + apiVersion: machine.openshift.io/v1beta1 + kind: MachineSet + spec: + replicas: 1 diff --git a/vendor/github.com/openshift/api/machine/v1beta1/types_awsprovider.go b/vendor/github.com/openshift/api/machine/v1beta1/types_awsprovider.go new file mode 100644 index 000000000..f3853579b --- /dev/null +++ b/vendor/github.com/openshift/api/machine/v1beta1/types_awsprovider.go @@ -0,0 +1,311 @@ +package v1beta1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// AWSMachineProviderConfig is the Schema for the awsmachineproviderconfigs API +// Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=2 +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type AWSMachineProviderConfig struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + // AMI is the reference to the AMI from which to create the machine instance. + AMI AWSResourceReference `json:"ami"` + // InstanceType is the type of instance to create. Example: m4.xlarge + InstanceType string `json:"instanceType"` + // Tags is the set of tags to add to apply to an instance, in addition to the ones + // added by default by the actuator. These tags are additive. The actuator will ensure + // these tags are present, but will not remove any other tags that may exist on the + // instance. + // +optional + Tags []TagSpecification `json:"tags,omitempty"` + // IAMInstanceProfile is a reference to an IAM role to assign to the instance + // +optional + IAMInstanceProfile *AWSResourceReference `json:"iamInstanceProfile,omitempty"` + // UserDataSecret contains a local reference to a secret that contains the + // UserData to apply to the instance + // +optional + UserDataSecret *corev1.LocalObjectReference `json:"userDataSecret,omitempty"` + // CredentialsSecret is a reference to the secret with AWS credentials. Otherwise, defaults to permissions + // provided by attached IAM role where the actuator is running. + // +optional + CredentialsSecret *corev1.LocalObjectReference `json:"credentialsSecret,omitempty"` + // KeyName is the name of the KeyPair to use for SSH + // +optional + KeyName *string `json:"keyName,omitempty"` + // DeviceIndex is the index of the device on the instance for the network interface attachment. + // Defaults to 0. + DeviceIndex int64 `json:"deviceIndex"` + // PublicIP specifies whether the instance should get a public IP. If not present, + // it should use the default of its subnet. + // +optional + PublicIP *bool `json:"publicIp,omitempty"` + // NetworkInterfaceType specifies the type of network interface to be used for the primary + // network interface. + // Valid values are "ENA", "EFA", and omitted, which means no opinion and the platform + // chooses a good default which may change over time. + // The current default value is "ENA". + // Please visit https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/efa.html to learn more + // about the AWS Elastic Fabric Adapter interface option. + // +kubebuilder:validation:Enum:="ENA";"EFA" + // +optional + NetworkInterfaceType AWSNetworkInterfaceType `json:"networkInterfaceType,omitempty"` + // SecurityGroups is an array of references to security groups that should be applied to the + // instance. + // +optional + SecurityGroups []AWSResourceReference `json:"securityGroups,omitempty"` + // Subnet is a reference to the subnet to use for this instance + Subnet AWSResourceReference `json:"subnet"` + // Placement specifies where to create the instance in AWS + Placement Placement `json:"placement"` + // LoadBalancers is the set of load balancers to which the new instance + // should be added once it is created. + // +optional + LoadBalancers []LoadBalancerReference `json:"loadBalancers,omitempty"` + // BlockDevices is the set of block device mapping associated to this instance, + // block device without a name will be used as a root device and only one device without a name is allowed + // https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html + // +optional + BlockDevices []BlockDeviceMappingSpec `json:"blockDevices,omitempty"` + // SpotMarketOptions allows users to configure instances to be run using AWS Spot instances. + // +optional + SpotMarketOptions *SpotMarketOptions `json:"spotMarketOptions,omitempty"` + // MetadataServiceOptions allows users to configure instance metadata service interaction options. + // If nothing specified, default AWS IMDS settings will be applied. + // https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_InstanceMetadataOptionsRequest.html + // +optional + MetadataServiceOptions MetadataServiceOptions `json:"metadataServiceOptions,omitempty"` + // PlacementGroupName specifies the name of the placement group in which to launch the instance. + // The placement group must already be created and may use any placement strategy. + // When omitted, no placement group is used when creating the EC2 instance. + // +optional + PlacementGroupName string `json:"placementGroupName,omitempty"` +} + +// BlockDeviceMappingSpec describes a block device mapping +type BlockDeviceMappingSpec struct { + // The device name exposed to the machine (for example, /dev/sdh or xvdh). + // +optional + DeviceName *string `json:"deviceName,omitempty"` + // Parameters used to automatically set up EBS volumes when the machine is + // launched. + // +optional + EBS *EBSBlockDeviceSpec `json:"ebs,omitempty"` + // Suppresses the specified device included in the block device mapping of the + // AMI. + // +optional + NoDevice *string `json:"noDevice,omitempty"` + // The virtual device name (ephemeralN). Machine store volumes are numbered + // starting from 0. An machine type with 2 available machine store volumes + // can specify mappings for ephemeral0 and ephemeral1.The number of available + // machine store volumes depends on the machine type. After you connect to + // the machine, you must mount the volume. + // + // Constraints: For M3 machines, you must specify machine store volumes in + // the block device mapping for the machine. When you launch an M3 machine, + // we ignore any machine store volumes specified in the block device mapping + // for the AMI. + // +optional + VirtualName *string `json:"virtualName,omitempty"` +} + +// EBSBlockDeviceSpec describes a block device for an EBS volume. +// https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/EbsBlockDevice +type EBSBlockDeviceSpec struct { + // Indicates whether the EBS volume is deleted on machine termination. + // +optional + DeleteOnTermination *bool `json:"deleteOnTermination,omitempty"` + // Indicates whether the EBS volume is encrypted. Encrypted Amazon EBS volumes + // may only be attached to machines that support Amazon EBS encryption. + // +optional + Encrypted *bool `json:"encrypted,omitempty"` + // Indicates the KMS key that should be used to encrypt the Amazon EBS volume. + // +optional + KMSKey AWSResourceReference `json:"kmsKey,omitempty"` + // The number of I/O operations per second (IOPS) that the volume supports. + // For io1, this represents the number of IOPS that are provisioned for the + // volume. For gp2, this represents the baseline performance of the volume and + // the rate at which the volume accumulates I/O credits for bursting. For more + // information about General Purpose SSD baseline performance, I/O credits, + // and bursting, see Amazon EBS Volume Types (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) + // in the Amazon Elastic Compute Cloud User Guide. + // + // Minimal and maximal IOPS for io1 and gp2 are constrained. Please, check + // https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html + // for precise boundaries for individual volumes. + // + // Condition: This parameter is required for requests to create io1 volumes; + // it is not used in requests to create gp2, st1, sc1, or standard volumes. + // +optional + Iops *int64 `json:"iops,omitempty"` + // The size of the volume, in GiB. + // + // Constraints: 1-16384 for General Purpose SSD (gp2), 4-16384 for Provisioned + // IOPS SSD (io1), 500-16384 for Throughput Optimized HDD (st1), 500-16384 for + // Cold HDD (sc1), and 1-1024 for Magnetic (standard) volumes. If you specify + // a snapshot, the volume size must be equal to or larger than the snapshot + // size. + // + // Default: If you're creating the volume from a snapshot and don't specify + // a volume size, the default is the snapshot size. + // +optional + VolumeSize *int64 `json:"volumeSize,omitempty"` + // The volume type: gp2, io1, st1, sc1, or standard. + // Default: standard + // +optional + VolumeType *string `json:"volumeType,omitempty"` +} + +// SpotMarketOptions defines the options available to a user when configuring +// Machines to run on Spot instances. +// Most users should provide an empty struct. +type SpotMarketOptions struct { + // The maximum price the user is willing to pay for their instances + // Default: On-Demand price + // +optional + MaxPrice *string `json:"maxPrice,omitempty"` +} + +type MetadataServiceAuthentication string + +const ( + // MetadataServiceAuthenticationRequired enforces sending of a signed token header with any instance metadata retrieval (GET) requests. + // Enforces IMDSv2 usage. + MetadataServiceAuthenticationRequired = "Required" + // MetadataServiceAuthenticationOptional allows IMDSv1 usage along with IMDSv2 + MetadataServiceAuthenticationOptional = "Optional" +) + +// MetadataServiceOptions defines the options available to a user when configuring +// Instance Metadata Service (IMDS) Options. +type MetadataServiceOptions struct { + // Authentication determines whether or not the host requires the use of authentication when interacting with the metadata service. + // When using authentication, this enforces v2 interaction method (IMDSv2) with the metadata service. + // When omitted, this means the user has no opinion and the value is left to the platform to choose a good + // default, which is subject to change over time. The current default is optional. + // At this point this field represents `HttpTokens` parameter from `InstanceMetadataOptionsRequest` structure in AWS EC2 API + // https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_InstanceMetadataOptionsRequest.html + // +kubebuilder:validation:Enum=Required;Optional + // +optional + Authentication MetadataServiceAuthentication `json:"authentication,omitempty"` +} + +// AWSResourceReference is a reference to a specific AWS resource by ID, ARN, or filters. +// Only one of ID, ARN or Filters may be specified. Specifying more than one will result in +// a validation error. +type AWSResourceReference struct { + // ID of resource + // +optional + ID *string `json:"id,omitempty"` + // ARN of resource + // +optional + ARN *string `json:"arn,omitempty"` + // Filters is a set of filters used to identify a resource + // +optional + Filters []Filter `json:"filters,omitempty"` +} + +// Placement indicates where to create the instance in AWS +type Placement struct { + // Region is the region to use to create the instance + // +optional + Region string `json:"region,omitempty"` + // AvailabilityZone is the availability zone of the instance + // +optional + AvailabilityZone string `json:"availabilityZone,omitempty"` + // Tenancy indicates if instance should run on shared or single-tenant hardware. There are + // supported 3 options: default, dedicated and host. + // +optional + Tenancy InstanceTenancy `json:"tenancy,omitempty"` +} + +// Filter is a filter used to identify an AWS resource +type Filter struct { + // Name of the filter. Filter names are case-sensitive. + Name string `json:"name"` + // Values includes one or more filter values. Filter values are case-sensitive. + // +optional + Values []string `json:"values,omitempty"` +} + +// TagSpecification is the name/value pair for a tag +type TagSpecification struct { + // Name of the tag + Name string `json:"name"` + // Value of the tag + Value string `json:"value"` +} + +// AWSMachineProviderConfigList contains a list of AWSMachineProviderConfig +// Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=2 +type AWSMachineProviderConfigList struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + Items []AWSMachineProviderConfig `json:"items"` +} + +// LoadBalancerReference is a reference to a load balancer on AWS. +type LoadBalancerReference struct { + Name string `json:"name"` + Type AWSLoadBalancerType `json:"type"` +} + +// AWSLoadBalancerType is the type of LoadBalancer to use when registering +// an instance with load balancers specified in LoadBalancerNames +type AWSLoadBalancerType string + +// InstanceTenancy indicates if instance should run on shared or single-tenant hardware. +type InstanceTenancy string + +const ( + // DefaultTenancy instance runs on shared hardware + DefaultTenancy InstanceTenancy = "default" + // DedicatedTenancy instance runs on single-tenant hardware + DedicatedTenancy InstanceTenancy = "dedicated" + // HostTenancy instance runs on a Dedicated Host, which is an isolated server with configurations that you can control. + HostTenancy InstanceTenancy = "host" +) + +// Possible values for AWSLoadBalancerType. Add to this list as other types +// of load balancer are supported by the actuator. +const ( + ClassicLoadBalancerType AWSLoadBalancerType = "classic" // AWS classic ELB + NetworkLoadBalancerType AWSLoadBalancerType = "network" // AWS Network Load Balancer (NLB) +) + +// AWSNetworkInterfaceType defines the network interface type of the the +// AWS EC2 network interface. +type AWSNetworkInterfaceType string + +const ( + // AWSENANetworkInterfaceType is the default network interface type, + // the EC2 Elastic Network Adapter commonly used with EC2 instances. + // This should be used for standard network operations. + AWSENANetworkInterfaceType AWSNetworkInterfaceType = "ENA" + // AWSEFANetworkInterfaceType is the Elastic Fabric Adapter network interface type. + AWSEFANetworkInterfaceType AWSNetworkInterfaceType = "EFA" +) + +// AWSMachineProviderStatus is the type that will be embedded in a Machine.Status.ProviderStatus field. +// It contains AWS-specific status information. +// Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=2 +type AWSMachineProviderStatus struct { + metav1.TypeMeta `json:",inline"` + // InstanceID is the instance ID of the machine created in AWS + // +optional + InstanceID *string `json:"instanceId,omitempty"` + // InstanceState is the state of the AWS instance for this machine + // +optional + InstanceState *string `json:"instanceState,omitempty"` + // Conditions is a set of conditions associated with the Machine to indicate + // errors or other status + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` +} diff --git a/vendor/github.com/openshift/api/machine/v1beta1/types_azureprovider.go b/vendor/github.com/openshift/api/machine/v1beta1/types_azureprovider.go new file mode 100644 index 000000000..1d565e5d2 --- /dev/null +++ b/vendor/github.com/openshift/api/machine/v1beta1/types_azureprovider.go @@ -0,0 +1,568 @@ +package v1beta1 + +import ( + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// SecurityEncryptionTypes represents the Encryption Type when the Azure Virtual Machine is a +// Confidential VM. +type SecurityEncryptionTypes string + +const ( + // SecurityEncryptionTypesVMGuestStateOnly disables OS disk confidential encryption. + SecurityEncryptionTypesVMGuestStateOnly SecurityEncryptionTypes = "VMGuestStateOnly" + // SecurityEncryptionTypesDiskWithVMGuestState enables OS disk confidential encryption with a + // platform-managed key (PMK) or a customer-managed key (CMK). + SecurityEncryptionTypesDiskWithVMGuestState SecurityEncryptionTypes = "DiskWithVMGuestState" +) + +// SecurityTypes represents the SecurityType of the virtual machine. +type SecurityTypes string + +const ( + // SecurityTypesConfidentialVM defines the SecurityType of the virtual machine as a Confidential VM. + SecurityTypesConfidentialVM SecurityTypes = "ConfidentialVM" + // SecurityTypesTrustedLaunch defines the SecurityType of the virtual machine as a Trusted Launch VM. + SecurityTypesTrustedLaunch SecurityTypes = "TrustedLaunch" +) + +// AzureMachineProviderSpec is the type that will be embedded in a Machine.Spec.ProviderSpec field +// for an Azure virtual machine. It is used by the Azure machine actuator to create a single Machine. +// Required parameters such as location that are not specified by this configuration, will be defaulted +// by the actuator. +// Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=2 +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type AzureMachineProviderSpec struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + // UserDataSecret contains a local reference to a secret that contains the + // UserData to apply to the instance + // +optional + UserDataSecret *corev1.SecretReference `json:"userDataSecret,omitempty"` + // CredentialsSecret is a reference to the secret with Azure credentials. + // +optional + CredentialsSecret *corev1.SecretReference `json:"credentialsSecret,omitempty"` + // Location is the region to use to create the instance + // +optional + Location string `json:"location,omitempty"` + // VMSize is the size of the VM to create. + // +optional + VMSize string `json:"vmSize,omitempty"` + // Image is the OS image to use to create the instance. + Image Image `json:"image"` + // OSDisk represents the parameters for creating the OS disk. + OSDisk OSDisk `json:"osDisk"` + // DataDisk specifies the parameters that are used to add one or more data disks to the machine. + // +optional + DataDisks []DataDisk `json:"dataDisks,omitempty"` + // SSHPublicKey is the public key to use to SSH to the virtual machine. + // +optional + SSHPublicKey string `json:"sshPublicKey,omitempty"` + // PublicIP if true a public IP will be used + PublicIP bool `json:"publicIP"` + // Tags is a list of tags to apply to the machine. + // +optional + Tags map[string]string `json:"tags,omitempty"` + // Network Security Group that needs to be attached to the machine's interface. + // No security group will be attached if empty. + // +optional + SecurityGroup string `json:"securityGroup,omitempty"` + // Application Security Groups that need to be attached to the machine's interface. + // No application security groups will be attached if zero-length. + // +optional + ApplicationSecurityGroups []string `json:"applicationSecurityGroups,omitempty"` + // Subnet to use for this instance + Subnet string `json:"subnet"` + // PublicLoadBalancer to use for this instance + // +optional + PublicLoadBalancer string `json:"publicLoadBalancer,omitempty"` + // InternalLoadBalancerName to use for this instance + // +optional + InternalLoadBalancer string `json:"internalLoadBalancer,omitempty"` + // NatRule to set inbound NAT rule of the load balancer + // +optional + NatRule *int64 `json:"natRule,omitempty"` + // ManagedIdentity to set managed identity name + // +optional + ManagedIdentity string `json:"managedIdentity,omitempty"` + // Vnet to set virtual network name + // +optional + Vnet string `json:"vnet,omitempty"` + // Availability Zone for the virtual machine. + // If nil, the virtual machine should be deployed to no zone + // +optional + Zone *string `json:"zone,omitempty"` + // NetworkResourceGroup is the resource group for the virtual machine's network + // +optional + NetworkResourceGroup string `json:"networkResourceGroup,omitempty"` + // ResourceGroup is the resource group for the virtual machine + // +optional + ResourceGroup string `json:"resourceGroup,omitempty"` + // SpotVMOptions allows the ability to specify the Machine should use a Spot VM + // +optional + SpotVMOptions *SpotVMOptions `json:"spotVMOptions,omitempty"` + // SecurityProfile specifies the Security profile settings for a virtual machine. + // +optional + SecurityProfile *SecurityProfile `json:"securityProfile,omitempty"` + // UltraSSDCapability enables or disables Azure UltraSSD capability for a virtual machine. + // This can be used to allow/disallow binding of Azure UltraSSD to the Machine both as Data Disks or via Persistent Volumes. + // This Azure feature is subject to a specific scope and certain limitations. + // More informations on this can be found in the official Azure documentation for Ultra Disks: + // (https://docs.microsoft.com/en-us/azure/virtual-machines/disks-enable-ultra-ssd?tabs=azure-portal#ga-scope-and-limitations). + // + // When omitted, if at least one Data Disk of type UltraSSD is specified, the platform will automatically enable the capability. + // If a Perisistent Volume backed by an UltraSSD is bound to a Pod on the Machine, when this field is ommitted, the platform will *not* automatically enable the capability (unless already enabled by the presence of an UltraSSD as Data Disk). + // This may manifest in the Pod being stuck in `ContainerCreating` phase. + // This defaulting behaviour may be subject to change in future. + // + // When set to "Enabled", if the capability is available for the Machine based on the scope and limitations described above, the capability will be set on the Machine. + // This will thus allow UltraSSD both as Data Disks and Persistent Volumes. + // If set to "Enabled" when the capability can't be available due to scope and limitations, the Machine will go into "Failed" state. + // + // When set to "Disabled", UltraSSDs will not be allowed either as Data Disks nor as Persistent Volumes. + // In this case if any UltraSSDs are specified as Data Disks on a Machine, the Machine will go into a "Failed" state. + // If instead any UltraSSDs are backing the volumes (via Persistent Volumes) of any Pods scheduled on a Node which is backed by the Machine, the Pod may get stuck in `ContainerCreating` phase. + // + // +kubebuilder:validation:Enum:="Enabled";"Disabled" + // +optional + UltraSSDCapability AzureUltraSSDCapabilityState `json:"ultraSSDCapability,omitempty"` + // AcceleratedNetworking enables or disables Azure accelerated networking feature. + // Set to false by default. If true, then this will depend on whether the requested + // VMSize is supported. If set to true with an unsupported VMSize, Azure will return an error. + // +optional + AcceleratedNetworking bool `json:"acceleratedNetworking,omitempty"` + // AvailabilitySet specifies the availability set to use for this instance. + // Availability set should be precreated, before using this field. + // +optional + AvailabilitySet string `json:"availabilitySet,omitempty"` + // Diagnostics configures the diagnostics settings for the virtual machine. + // This allows you to configure boot diagnostics such as capturing serial output from + // the virtual machine on boot. + // This is useful for debugging software based launch issues. + // +optional + Diagnostics AzureDiagnostics `json:"diagnostics,omitempty"` +} + +// SpotVMOptions defines the options relevant to running the Machine on Spot VMs +type SpotVMOptions struct { + // MaxPrice defines the maximum price the user is willing to pay for Spot VM instances + // +optional + MaxPrice *resource.Quantity `json:"maxPrice,omitempty"` +} + +// AzureDiagnostics is used to configure the diagnostic settings of the virtual machine. +type AzureDiagnostics struct { + // AzureBootDiagnostics configures the boot diagnostics settings for the virtual machine. + // This allows you to configure capturing serial output from the virtual machine on boot. + // This is useful for debugging software based launch issues. + // + This is a pointer so that we can validate required fields only when the structure is + // + configured by the user. + // +optional + Boot *AzureBootDiagnostics `json:"boot,omitempty"` +} + +// AzureBootDiagnostics configures the boot diagnostics settings for the virtual machine. +// This allows you to configure capturing serial output from the virtual machine on boot. +// This is useful for debugging software based launch issues. +// +union +type AzureBootDiagnostics struct { + // StorageAccountType determines if the storage account for storing the diagnostics data + // should be provisioned by Azure (AzureManaged) or by the customer (CustomerManaged). + // +kubebuilder:validation:Required + // +unionDiscriminator + StorageAccountType AzureBootDiagnosticsStorageAccountType `json:"storageAccountType"` + + // CustomerManaged provides reference to the customer manager storage account. + // +optional + CustomerManaged *AzureCustomerManagedBootDiagnostics `json:"customerManaged,omitempty"` +} + +// AzureCustomerManagedBootDiagnostics provides reference to a customer managed +// storage account. +type AzureCustomerManagedBootDiagnostics struct { + // StorageAccountURI is the URI of the customer managed storage account. + // The URI typically will be `https://.blob.core.windows.net/` + // but may differ if you are using Azure DNS zone endpoints. + // You can find the correct endpoint by looking for the Blob Primary Endpoint in the + // endpoints tab in the Azure console. + // +kubebuilder:validation:Required + // +kubebuilder:validation:Pattern=`^https://` + // +kubebuilder:validation:MaxLength=1024 + StorageAccountURI string `json:"storageAccountURI"` +} + +// AzureBootDiagnosticsStorageAccountType defines the list of valid storage account types +// for the boot diagnostics. +// +kubebuilder:validation:Enum:="AzureManaged";"CustomerManaged" +type AzureBootDiagnosticsStorageAccountType string + +const ( + // AzureManagedAzureDiagnosticsStorage is used to determine that the diagnostics storage account + // should be provisioned by Azure. + AzureManagedAzureDiagnosticsStorage AzureBootDiagnosticsStorageAccountType = "AzureManaged" + + // CustomerManagedAzureDiagnosticsStorage is used to determine that the diagnostics storage account + // should be provisioned by the Customer. + CustomerManagedAzureDiagnosticsStorage AzureBootDiagnosticsStorageAccountType = "CustomerManaged" +) + +// AzureMachineProviderStatus is the type that will be embedded in a Machine.Status.ProviderStatus field. +// It contains Azure-specific status information. +// Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=2 +type AzureMachineProviderStatus struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + // VMID is the ID of the virtual machine created in Azure. + // +optional + VMID *string `json:"vmId,omitempty"` + // VMState is the provisioning state of the Azure virtual machine. + // +optional + VMState *AzureVMState `json:"vmState,omitempty"` + // Conditions is a set of conditions associated with the Machine to indicate + // errors or other status. + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` +} + +// VMState describes the state of an Azure virtual machine. +type AzureVMState string + +const ( + // ProvisioningState related values + // VMStateCreating ... + VMStateCreating = AzureVMState("Creating") + // VMStateDeleting ... + VMStateDeleting = AzureVMState("Deleting") + // VMStateFailed ... + VMStateFailed = AzureVMState("Failed") + // VMStateMigrating ... + VMStateMigrating = AzureVMState("Migrating") + // VMStateSucceeded ... + VMStateSucceeded = AzureVMState("Succeeded") + // VMStateUpdating ... + VMStateUpdating = AzureVMState("Updating") + + // PowerState related values + // VMStateStarting ... + VMStateStarting = AzureVMState("Starting") + // VMStateRunning ... + VMStateRunning = AzureVMState("Running") + // VMStateStopping ... + VMStateStopping = AzureVMState("Stopping") + // VMStateStopped ... + VMStateStopped = AzureVMState("Stopped") + // VMStateDeallocating ... + VMStateDeallocating = AzureVMState("Deallocating") + // VMStateDeallocated ... + VMStateDeallocated = AzureVMState("Deallocated") + // VMStateUnknown ... + VMStateUnknown = AzureVMState("Unknown") +) + +// Image is a mirror of azure sdk compute.ImageReference +type Image struct { + // Publisher is the name of the organization that created the image + Publisher string `json:"publisher"` + // Offer specifies the name of a group of related images created by the publisher. + // For example, UbuntuServer, WindowsServer + Offer string `json:"offer"` + // SKU specifies an instance of an offer, such as a major release of a distribution. + // For example, 18.04-LTS, 2019-Datacenter + SKU string `json:"sku"` + // Version specifies the version of an image sku. The allowed formats + // are Major.Minor.Build or 'latest'. Major, Minor, and Build are decimal numbers. + // Specify 'latest' to use the latest version of an image available at deploy time. + // Even if you use 'latest', the VM image will not automatically update after deploy + // time even if a new version becomes available. + Version string `json:"version"` + // ResourceID specifies an image to use by ID + ResourceID string `json:"resourceID"` + // Type identifies the source of the image and related information, such as purchase plans. + // Valid values are "ID", "MarketplaceWithPlan", "MarketplaceNoPlan", and omitted, which + // means no opinion and the platform chooses a good default which may change over time. + // Currently that default is "MarketplaceNoPlan" if publisher data is supplied, or "ID" if not. + // For more information about purchase plans, see: + // https://docs.microsoft.com/en-us/azure/virtual-machines/linux/cli-ps-findimage#check-the-purchase-plan-information + // +optional + Type AzureImageType `json:"type,omitempty"` +} + +// AzureImageType provides an enumeration for the valid image types. +type AzureImageType string + +const ( + // AzureImageTypeID specifies that the image should be referenced by its resource ID. + AzureImageTypeID AzureImageType = "ID" + // AzureImageTypeMarketplaceNoPlan are images available from the marketplace that do not require a purchase plan. + AzureImageTypeMarketplaceNoPlan AzureImageType = "MarketplaceNoPlan" + // AzureImageTypeMarketplaceWithPlan require a purchase plan. Upstream these images are referred to as "ThirdParty." + AzureImageTypeMarketplaceWithPlan AzureImageType = "MarketplaceWithPlan" +) + +type OSDisk struct { + // OSType is the operating system type of the OS disk. Possible values include "Linux" and "Windows". + OSType string `json:"osType"` + // ManagedDisk specifies the Managed Disk parameters for the OS disk. + ManagedDisk OSDiskManagedDiskParameters `json:"managedDisk"` + // DiskSizeGB is the size in GB to assign to the data disk. + DiskSizeGB int32 `json:"diskSizeGB"` + // DiskSettings describe ephemeral disk settings for the os disk. + // +optional + DiskSettings DiskSettings `json:"diskSettings,omitempty"` + // CachingType specifies the caching requirements. + // Possible values include: 'None', 'ReadOnly', 'ReadWrite'. + // Empty value means no opinion and the platform chooses a default, which is subject to change over + // time. Currently the default is `None`. + // +optional + // +kubebuilder:validation:Enum=None;ReadOnly;ReadWrite + CachingType string `json:"cachingType,omitempty"` +} + +// DataDisk specifies the parameters that are used to add one or more data disks to the machine. +// A Data Disk is a managed disk that's attached to a virtual machine to store application data. +// It differs from an OS Disk as it doesn't come with a pre-installed OS, and it cannot contain the boot volume. +// It is registered as SCSI drive and labeled with the chosen `lun`. e.g. for `lun: 0` the raw disk device will be available at `/dev/disk/azure/scsi1/lun0`. +// +// As the Data Disk disk device is attached raw to the virtual machine, it will need to be partitioned, formatted with a filesystem and mounted, in order for it to be usable. +// This can be done by creating a custom userdata Secret with custom Ignition configuration to achieve the desired initialization. +// At this stage the previously defined `lun` is to be used as the "device" key for referencing the raw disk device to be initialized. +// Once the custom userdata Secret has been created, it can be referenced in the Machine's `.providerSpec.userDataSecret`. +// For further guidance and examples, please refer to the official OpenShift docs. +type DataDisk struct { + // NameSuffix is the suffix to be appended to the machine name to generate the disk name. + // Each disk name will be in format _. + // NameSuffix name must start and finish with an alphanumeric character and can only contain letters, numbers, underscores, periods or hyphens. + // The overall disk name must not exceed 80 chars in length. + // +kubebuilder:validation:Pattern:=`^[a-zA-Z0-9](?:[\w\.-]*[a-zA-Z0-9])?$` + // +kubebuilder:validation:MaxLength:=78 + // +kubebuilder:validation:Required + NameSuffix string `json:"nameSuffix"` + // DiskSizeGB is the size in GB to assign to the data disk. + // +kubebuilder:validation:Minimum=4 + // +kubebuilder:validation:Required + DiskSizeGB int32 `json:"diskSizeGB"` + // ManagedDisk specifies the Managed Disk parameters for the data disk. + // Empty value means no opinion and the platform chooses a default, which is subject to change over time. + // Currently the default is a ManagedDisk with with storageAccountType: "Premium_LRS" and diskEncryptionSet.id: "Default". + // +optional + ManagedDisk DataDiskManagedDiskParameters `json:"managedDisk,omitempty"` + // Lun Specifies the logical unit number of the data disk. + // This value is used to identify data disks within the VM and therefore must be unique for each data disk attached to a VM. + // This value is also needed for referencing the data disks devices within userdata to perform disk initialization through Ignition (e.g. partition/format/mount). + // The value must be between 0 and 63. + // +kubebuilder:validation:Minimum=0 + // +kubebuilder:validation:Maximum=63 + // +kubebuilder:validation:Required + Lun int32 `json:"lun,omitempty"` + // CachingType specifies the caching requirements. + // Empty value means no opinion and the platform chooses a default, which is subject to change over time. + // Currently the default is CachingTypeNone. + // +optional + // +kubebuilder:validation:Enum=None;ReadOnly;ReadWrite + CachingType CachingTypeOption `json:"cachingType,omitempty"` + // DeletionPolicy specifies the data disk deletion policy upon Machine deletion. + // Possible values are "Delete","Detach". + // When "Delete" is used the data disk is deleted when the Machine is deleted. + // When "Detach" is used the data disk is detached from the Machine and retained when the Machine is deleted. + // +kubebuilder:validation:Enum=Delete;Detach + // +kubebuilder:validation:Required + DeletionPolicy DiskDeletionPolicyType `json:"deletionPolicy"` +} + +// DiskDeletionPolicyType defines the possible values for DeletionPolicy. +type DiskDeletionPolicyType string + +// These are the valid DiskDeletionPolicyType values. +const ( + // DiskDeletionPolicyTypeDelete means the DiskDeletionPolicyType is "Delete". + DiskDeletionPolicyTypeDelete DiskDeletionPolicyType = "Delete" + // DiskDeletionPolicyTypeDetach means the DiskDeletionPolicyType is "Detach". + DiskDeletionPolicyTypeDetach DiskDeletionPolicyType = "Detach" +) + +// CachingTypeOption defines the different values for a CachingType. +type CachingTypeOption string + +// These are the valid CachingTypeOption values. +const ( + // CachingTypeReadOnly means the CachingType is "ReadOnly". + CachingTypeReadOnly CachingTypeOption = "ReadOnly" + // CachingTypeReadWrite means the CachingType is "ReadWrite". + CachingTypeReadWrite CachingTypeOption = "ReadWrite" + // CachingTypeNone means the CachingType is "None". + CachingTypeNone CachingTypeOption = "None" +) + +// DiskSettings describe ephemeral disk settings for the os disk. +type DiskSettings struct { + // EphemeralStorageLocation enables ephemeral OS when set to 'Local'. + // Possible values include: 'Local'. + // See https://docs.microsoft.com/en-us/azure/virtual-machines/ephemeral-os-disks for full details. + // Empty value means no opinion and the platform chooses a default, which is subject to change over + // time. Currently the default is that disks are saved to remote Azure storage. + // +optional + // +kubebuilder:validation:Enum=Local + EphemeralStorageLocation string `json:"ephemeralStorageLocation,omitempty"` +} + +// OSDiskManagedDiskParameters is the parameters of a OSDisk managed disk. +type OSDiskManagedDiskParameters struct { + // StorageAccountType is the storage account type to use. + // Possible values include "Standard_LRS", "Premium_LRS". + StorageAccountType string `json:"storageAccountType"` + // DiskEncryptionSet is the disk encryption set properties + // +optional + DiskEncryptionSet *DiskEncryptionSetParameters `json:"diskEncryptionSet,omitempty"` + // securityProfile specifies the security profile for the managed disk. + // +optional + SecurityProfile VMDiskSecurityProfile `json:"securityProfile,omitempty"` +} + +// VMDiskSecurityProfile specifies the security profile settings for the managed disk. +// It can be set only for Confidential VMs. +type VMDiskSecurityProfile struct { + // diskEncryptionSet specifies the customer managed disk encryption set resource id for the + // managed disk that is used for Customer Managed Key encrypted ConfidentialVM OS Disk and + // VMGuest blob. + // +optional + DiskEncryptionSet DiskEncryptionSetParameters `json:"diskEncryptionSet,omitempty"` + // securityEncryptionType specifies the encryption type of the managed disk. + // It is set to DiskWithVMGuestState to encrypt the managed disk along with the VMGuestState + // blob, and to VMGuestStateOnly to encrypt the VMGuestState blob only. + // When set to VMGuestStateOnly, the vTPM should be enabled. + // When set to DiskWithVMGuestState, both SecureBoot and vTPM should be enabled. + // If the above conditions are not fulfilled, the VM will not be created and the respective error + // will be returned. + // It can be set only for Confidential VMs. Confidential VMs are defined by their + // SecurityProfile.SecurityType being set to ConfidentialVM, the SecurityEncryptionType of their + // OS disk being set to one of the allowed values and by enabling the respective + // SecurityProfile.UEFISettings of the VM (i.e. vTPM and SecureBoot), depending on the selected + // SecurityEncryptionType. + // For further details on Azure Confidential VMs, please refer to the respective documentation: + // https://learn.microsoft.com/azure/confidential-computing/confidential-vm-overview + // +kubebuilder:validation:Enum=VMGuestStateOnly;DiskWithVMGuestState + // +optional + SecurityEncryptionType SecurityEncryptionTypes `json:"securityEncryptionType,omitempty"` +} + +// DataDiskManagedDiskParameters is the parameters of a DataDisk managed disk. +type DataDiskManagedDiskParameters struct { + // StorageAccountType is the storage account type to use. + // Possible values include "Standard_LRS", "Premium_LRS" and "UltraSSD_LRS". + // +kubebuilder:validation:Enum=Standard_LRS;Premium_LRS;UltraSSD_LRS + StorageAccountType StorageAccountType `json:"storageAccountType"` + // DiskEncryptionSet is the disk encryption set properties. + // Empty value means no opinion and the platform chooses a default, which is subject to change over time. + // Currently the default is a DiskEncryptionSet with id: "Default". + // +optional + DiskEncryptionSet *DiskEncryptionSetParameters `json:"diskEncryptionSet,omitempty"` +} + +// StorageAccountType defines the different storage types to use for a ManagedDisk. +type StorageAccountType string + +// These are the valid StorageAccountType types. +const ( + // "StorageAccountStandardLRS" means the Standard_LRS storage type. + StorageAccountStandardLRS StorageAccountType = "Standard_LRS" + // "StorageAccountPremiumLRS" means the Premium_LRS storage type. + StorageAccountPremiumLRS StorageAccountType = "Premium_LRS" + // "StorageAccountUltraSSDLRS" means the UltraSSD_LRS storage type. + StorageAccountUltraSSDLRS StorageAccountType = "UltraSSD_LRS" +) + +// DiskEncryptionSetParameters is the disk encryption set properties +type DiskEncryptionSetParameters struct { + // ID is the disk encryption set ID + // Empty value means no opinion and the platform chooses a default, which is subject to change over time. + // Currently the default is: "Default". + // +optional + ID string `json:"id,omitempty"` +} + +// SecurityProfile specifies the Security profile settings for a +// virtual machine or virtual machine scale set. +type SecurityProfile struct { + // encryptionAtHost indicates whether Host Encryption should be enabled or disabled for a virtual + // machine or virtual machine scale set. + // This should be disabled when SecurityEncryptionType is set to DiskWithVMGuestState. + // Default is disabled. + // +optional + EncryptionAtHost *bool `json:"encryptionAtHost,omitempty"` + // settings specify the security type and the UEFI settings of the virtual machine. This field can + // be set for Confidential VMs and Trusted Launch for VMs. + // +optional + Settings SecuritySettings `json:"settings,omitempty"` +} + +// SecuritySettings define the security type and the UEFI settings of the virtual machine. +// +union +type SecuritySettings struct { + // securityType specifies the SecurityType of the virtual machine. It has to be set to any specified value to + // enable UEFISettings. The default behavior is: UEFISettings will not be enabled unless this property is set. + // +kubebuilder:validation:Enum=ConfidentialVM;TrustedLaunch + // +kubebuilder:validation:Required + // +unionDiscriminator + SecurityType SecurityTypes `json:"securityType,omitempty"` + // confidentialVM specifies the security configuration of the virtual machine. + // For more information regarding Confidential VMs, please refer to: + // https://learn.microsoft.com/azure/confidential-computing/confidential-vm-overview + // +optional + ConfidentialVM *ConfidentialVM `json:"confidentialVM,omitempty"` + // trustedLaunch specifies the security configuration of the virtual machine. + // For more information regarding TrustedLaunch for VMs, please refer to: + // https://learn.microsoft.com/azure/virtual-machines/trusted-launch + // +optional + TrustedLaunch *TrustedLaunch `json:"trustedLaunch,omitempty"` +} + +// ConfidentialVM defines the UEFI settings for the virtual machine. +type ConfidentialVM struct { + // uefiSettings specifies the security settings like secure boot and vTPM used while creating the virtual machine. + // +kubebuilder:validation:Required + UEFISettings UEFISettings `json:"uefiSettings,omitempty"` +} + +// TrustedLaunch defines the UEFI settings for the virtual machine. +type TrustedLaunch struct { + // uefiSettings specifies the security settings like secure boot and vTPM used while creating the virtual machine. + // +kubebuilder:validation:Required + UEFISettings UEFISettings `json:"uefiSettings,omitempty"` +} + +// UEFISettings specifies the security settings like secure boot and vTPM used while creating the +// virtual machine. +type UEFISettings struct { + // secureBoot specifies whether secure boot should be enabled on the virtual machine. + // Secure Boot verifies the digital signature of all boot components and halts the boot process if + // signature verification fails. + // If omitted, the platform chooses a default, which is subject to change over time, currently that default is disabled. + // +kubebuilder:validation:Enum=Enabled;Disabled + // +optional + SecureBoot SecureBootPolicy `json:"secureBoot,omitempty"` + // virtualizedTrustedPlatformModule specifies whether vTPM should be enabled on the virtual machine. + // When enabled the virtualized trusted platform module measurements are used to create a known good boot integrity policy baseline. + // The integrity policy baseline is used for comparison with measurements from subsequent VM boots to determine if anything has changed. + // This is required to be enabled if SecurityEncryptionType is defined. + // If omitted, the platform chooses a default, which is subject to change over time, currently that default is disabled. + // +kubebuilder:validation:Enum=Enabled;Disabled + // +optional + VirtualizedTrustedPlatformModule VirtualizedTrustedPlatformModulePolicy `json:"virtualizedTrustedPlatformModule,omitempty"` +} + +// AzureUltraSSDCapabilityState defines the different states of an UltraSSDCapability +type AzureUltraSSDCapabilityState string + +// These are the valid AzureUltraSSDCapabilityState states. +const ( + // "AzureUltraSSDCapabilityEnabled" means the Azure UltraSSDCapability is Enabled + AzureUltraSSDCapabilityEnabled AzureUltraSSDCapabilityState = "Enabled" + // "AzureUltraSSDCapabilityDisabled" means the Azure UltraSSDCapability is Disabled + AzureUltraSSDCapabilityDisabled AzureUltraSSDCapabilityState = "Disabled" +) diff --git a/vendor/github.com/openshift/api/machine/v1beta1/types_gcpprovider.go b/vendor/github.com/openshift/api/machine/v1beta1/types_gcpprovider.go new file mode 100644 index 000000000..86e62f9ec --- /dev/null +++ b/vendor/github.com/openshift/api/machine/v1beta1/types_gcpprovider.go @@ -0,0 +1,284 @@ +package v1beta1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// GCPHostMaintenanceType is a type representing acceptable values for OnHostMaintenance field in GCPMachineProviderSpec +type GCPHostMaintenanceType string + +const ( + // MigrateHostMaintenanceType [default] - causes Compute Engine to live migrate an instance when there is a maintenance event. + MigrateHostMaintenanceType GCPHostMaintenanceType = "Migrate" + // TerminateHostMaintenanceType - stops an instance instead of migrating it. + TerminateHostMaintenanceType GCPHostMaintenanceType = "Terminate" +) + +// GCPHostMaintenanceType is a type representing acceptable values for RestartPolicy field in GCPMachineProviderSpec +type GCPRestartPolicyType string + +const ( + // Restart an instance if an instance crashes or the underlying infrastructure provider stops the instance as part of a maintenance event. + RestartPolicyAlways GCPRestartPolicyType = "Always" + // Do not restart an instance if an instance crashes or the underlying infrastructure provider stops the instance as part of a maintenance event. + RestartPolicyNever GCPRestartPolicyType = "Never" +) + +// SecureBootPolicy represents the secure boot configuration for the GCP machine. +type SecureBootPolicy string + +const ( + // SecureBootPolicyEnabled enables the secure boot configuration for the GCP machine. + SecureBootPolicyEnabled SecureBootPolicy = "Enabled" + // SecureBootPolicyDisabled disables the secure boot configuration for the GCP machine. + SecureBootPolicyDisabled SecureBootPolicy = "Disabled" +) + +// VirtualizedTrustedPlatformModulePolicy represents the virtualized trusted platform module configuration for the GCP machine. +type VirtualizedTrustedPlatformModulePolicy string + +const ( + // VirtualizedTrustedPlatformModulePolicyEnabled enables the virtualized trusted platform module configuration for the GCP machine. + VirtualizedTrustedPlatformModulePolicyEnabled VirtualizedTrustedPlatformModulePolicy = "Enabled" + // VirtualizedTrustedPlatformModulePolicyDisabled disables the virtualized trusted platform module configuration for the GCP machine. + VirtualizedTrustedPlatformModulePolicyDisabled VirtualizedTrustedPlatformModulePolicy = "Disabled" +) + +// IntegrityMonitoringPolicy represents the integrity monitoring configuration for the GCP machine. +type IntegrityMonitoringPolicy string + +const ( + // IntegrityMonitoringPolicyEnabled enables integrity monitoring for the GCP machine. + IntegrityMonitoringPolicyEnabled IntegrityMonitoringPolicy = "Enabled" + // IntegrityMonitoringPolicyDisabled disables integrity monitoring for the GCP machine. + IntegrityMonitoringPolicyDisabled IntegrityMonitoringPolicy = "Disabled" +) + +// ConfidentialComputePolicy represents the confidential compute configuration for the GCP machine. +type ConfidentialComputePolicy string + +const ( + // ConfidentialComputePolicyEnabled enables confidential compute for the GCP machine. + ConfidentialComputePolicyEnabled ConfidentialComputePolicy = "Enabled" + // ConfidentialComputePolicyDisabled disables confidential compute for the GCP machine. + ConfidentialComputePolicyDisabled ConfidentialComputePolicy = "Disabled" +) + +// GCPMachineProviderSpec is the type that will be embedded in a Machine.Spec.ProviderSpec field +// for an GCP virtual machine. It is used by the GCP machine actuator to create a single Machine. +// Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=2 +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type GCPMachineProviderSpec struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty"` + // UserDataSecret contains a local reference to a secret that contains the + // UserData to apply to the instance + // +optional + UserDataSecret *corev1.LocalObjectReference `json:"userDataSecret,omitempty"` + // CredentialsSecret is a reference to the secret with GCP credentials. + // +optional + CredentialsSecret *corev1.LocalObjectReference `json:"credentialsSecret,omitempty"` + // CanIPForward Allows this instance to send and receive packets with non-matching destination or source IPs. + // This is required if you plan to use this instance to forward routes. + CanIPForward bool `json:"canIPForward"` + // DeletionProtection whether the resource should be protected against deletion. + DeletionProtection bool `json:"deletionProtection"` + // Disks is a list of disks to be attached to the VM. + // +optional + Disks []*GCPDisk `json:"disks,omitempty"` + // Labels list of labels to apply to the VM. + // +optional + Labels map[string]string `json:"labels,omitempty"` + // Metadata key/value pairs to apply to the VM. + // +optional + Metadata []*GCPMetadata `json:"gcpMetadata,omitempty"` + // NetworkInterfaces is a list of network interfaces to be attached to the VM. + // +optional + NetworkInterfaces []*GCPNetworkInterface `json:"networkInterfaces,omitempty"` + // ServiceAccounts is a list of GCP service accounts to be used by the VM. + ServiceAccounts []GCPServiceAccount `json:"serviceAccounts"` + // Tags list of tags to apply to the VM. + Tags []string `json:"tags,omitempty"` + // TargetPools are used for network TCP/UDP load balancing. A target pool references member instances, + // an associated legacy HttpHealthCheck resource, and, optionally, a backup target pool + // +optional + TargetPools []string `json:"targetPools,omitempty"` + // MachineType is the machine type to use for the VM. + MachineType string `json:"machineType"` + // Region is the region in which the GCP machine provider will create the VM. + Region string `json:"region"` + // Zone is the zone in which the GCP machine provider will create the VM. + Zone string `json:"zone"` + // ProjectID is the project in which the GCP machine provider will create the VM. + // +optional + ProjectID string `json:"projectID,omitempty"` + // GPUs is a list of GPUs to be attached to the VM. + // +optional + GPUs []GCPGPUConfig `json:"gpus,omitempty"` + // Preemptible indicates if created instance is preemptible. + // +optional + Preemptible bool `json:"preemptible,omitempty"` + // OnHostMaintenance determines the behavior when a maintenance event occurs that might cause the instance to reboot. + // This is required to be set to "Terminate" if you want to provision machine with attached GPUs. + // Otherwise, allowed values are "Migrate" and "Terminate". + // If omitted, the platform chooses a default, which is subject to change over time, currently that default is "Migrate". + // +kubebuilder:validation:Enum=Migrate;Terminate; + // +optional + OnHostMaintenance GCPHostMaintenanceType `json:"onHostMaintenance,omitempty"` + // RestartPolicy determines the behavior when an instance crashes or the underlying infrastructure provider stops the instance as part of a maintenance event (default "Always"). + // Cannot be "Always" with preemptible instances. + // Otherwise, allowed values are "Always" and "Never". + // If omitted, the platform chooses a default, which is subject to change over time, currently that default is "Always". + // RestartPolicy represents AutomaticRestart in GCP compute api + // +kubebuilder:validation:Enum=Always;Never; + // +optional + RestartPolicy GCPRestartPolicyType `json:"restartPolicy,omitempty"` + + // ShieldedInstanceConfig is the Shielded VM configuration for the VM + // +optional + ShieldedInstanceConfig GCPShieldedInstanceConfig `json:"shieldedInstanceConfig,omitempty"` + + // confidentialCompute Defines whether the instance should have confidential compute enabled. + // If enabled OnHostMaintenance is required to be set to "Terminate". + // If omitted, the platform chooses a default, which is subject to change over time, currently that default is false. + // +kubebuilder:validation:Enum=Enabled;Disabled + // +optional + ConfidentialCompute ConfidentialComputePolicy `json:"confidentialCompute,omitempty"` +} + +// GCPDisk describes disks for GCP. +type GCPDisk struct { + // AutoDelete indicates if the disk will be auto-deleted when the instance is deleted (default false). + AutoDelete bool `json:"autoDelete"` + // Boot indicates if this is a boot disk (default false). + Boot bool `json:"boot"` + // SizeGB is the size of the disk (in GB). + SizeGB int64 `json:"sizeGb"` + // Type is the type of the disk (eg: pd-standard). + Type string `json:"type"` + // Image is the source image to create this disk. + Image string `json:"image"` + // Labels list of labels to apply to the disk. + Labels map[string]string `json:"labels"` + // EncryptionKey is the customer-supplied encryption key of the disk. + // +optional + EncryptionKey *GCPEncryptionKeyReference `json:"encryptionKey,omitempty"` +} + +// GCPMetadata describes metadata for GCP. +type GCPMetadata struct { + // Key is the metadata key. + Key string `json:"key"` + // Value is the metadata value. + Value *string `json:"value"` +} + +// GCPNetworkInterface describes network interfaces for GCP +type GCPNetworkInterface struct { + // PublicIP indicates if true a public IP will be used + PublicIP bool `json:"publicIP,omitempty"` + // Network is the network name. + Network string `json:"network,omitempty"` + // ProjectID is the project in which the GCP machine provider will create the VM. + ProjectID string `json:"projectID,omitempty"` + // Subnetwork is the subnetwork name. + Subnetwork string `json:"subnetwork,omitempty"` +} + +// GCPServiceAccount describes service accounts for GCP. +type GCPServiceAccount struct { + // Email is the service account email. + Email string `json:"email"` + // Scopes list of scopes to be assigned to the service account. + Scopes []string `json:"scopes"` +} + +// GCPEncryptionKeyReference describes the encryptionKey to use for a disk's encryption. +type GCPEncryptionKeyReference struct { + // KMSKeyName is the reference KMS key, in the format + // +optional + KMSKey *GCPKMSKeyReference `json:"kmsKey,omitempty"` + // KMSKeyServiceAccount is the service account being used for the + // encryption request for the given KMS key. If absent, the Compute + // Engine default service account is used. + // See https://cloud.google.com/compute/docs/access/service-accounts#compute_engine_service_account + // for details on the default service account. + // +optional + KMSKeyServiceAccount string `json:"kmsKeyServiceAccount,omitempty"` +} + +// GCPKMSKeyReference gathers required fields for looking up a GCP KMS Key +type GCPKMSKeyReference struct { + // Name is the name of the customer managed encryption key to be used for the disk encryption. + Name string `json:"name"` + // KeyRing is the name of the KMS Key Ring which the KMS Key belongs to. + KeyRing string `json:"keyRing"` + // ProjectID is the ID of the Project in which the KMS Key Ring exists. + // Defaults to the VM ProjectID if not set. + // +optional + ProjectID string `json:"projectID,omitempty"` + // Location is the GCP location in which the Key Ring exists. + Location string `json:"location"` +} + +// GCPGPUConfig describes type and count of GPUs attached to the instance on GCP. +type GCPGPUConfig struct { + // Count is the number of GPUs to be attached to an instance. + Count int32 `json:"count"` + // Type is the type of GPU to be attached to an instance. + // Supported GPU types are: nvidia-tesla-k80, nvidia-tesla-p100, nvidia-tesla-v100, nvidia-tesla-p4, nvidia-tesla-t4 + // +kubebuilder:validation:Pattern=`^nvidia-tesla-(k80|p100|v100|p4|t4)$` + Type string `json:"type"` +} + +// GCPMachineProviderStatus is the type that will be embedded in a Machine.Status.ProviderStatus field. +// It contains GCP-specific status information. +// Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=2 +type GCPMachineProviderStatus struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + // InstanceID is the ID of the instance in GCP + // +optional + InstanceID *string `json:"instanceId,omitempty"` + // InstanceState is the provisioning state of the GCP Instance. + // +optional + InstanceState *string `json:"instanceState,omitempty"` + // Conditions is a set of conditions associated with the Machine to indicate + // errors or other status + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` +} + +// GCPShieldedInstanceConfig describes the shielded VM configuration of the instance on GCP. +// Shielded VM configuration allow users to enable and disable Secure Boot, vTPM, and Integrity Monitoring. +type GCPShieldedInstanceConfig struct { + // SecureBoot Defines whether the instance should have secure boot enabled. + // Secure Boot verify the digital signature of all boot components, and halting the boot process if signature verification fails. + // If omitted, the platform chooses a default, which is subject to change over time, currently that default is Disabled. + // +kubebuilder:validation:Enum=Enabled;Disabled + //+optional + SecureBoot SecureBootPolicy `json:"secureBoot,omitempty"` + + // VirtualizedTrustedPlatformModule enable virtualized trusted platform module measurements to create a known good boot integrity policy baseline. + // The integrity policy baseline is used for comparison with measurements from subsequent VM boots to determine if anything has changed. + // This is required to be set to "Enabled" if IntegrityMonitoring is enabled. + // If omitted, the platform chooses a default, which is subject to change over time, currently that default is Enabled. + // +kubebuilder:validation:Enum=Enabled;Disabled + // +optional + VirtualizedTrustedPlatformModule VirtualizedTrustedPlatformModulePolicy `json:"virtualizedTrustedPlatformModule,omitempty"` + + // IntegrityMonitoring determines whether the instance should have integrity monitoring that verify the runtime boot integrity. + // Compares the most recent boot measurements to the integrity policy baseline and return + // a pair of pass/fail results depending on whether they match or not. + // If omitted, the platform chooses a default, which is subject to change over time, currently that default is Enabled. + // +kubebuilder:validation:Enum=Enabled;Disabled + // +optional + IntegrityMonitoring IntegrityMonitoringPolicy `json:"integrityMonitoring,omitempty"` +} diff --git a/vendor/github.com/openshift/api/machine/v1beta1/types_machine.go b/vendor/github.com/openshift/api/machine/v1beta1/types_machine.go new file mode 100644 index 000000000..3f4b12893 --- /dev/null +++ b/vendor/github.com/openshift/api/machine/v1beta1/types_machine.go @@ -0,0 +1,388 @@ +package v1beta1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +const ( + // MachineFinalizer is set on PrepareForCreate callback. + MachineFinalizer = "machine.machine.openshift.io" + + // MachineClusterLabelName is the label set on machines linked to a cluster. + MachineClusterLabelName = "cluster.k8s.io/cluster-name" + + // MachineClusterIDLabel is the label that a machine must have to identify the + // cluster to which it belongs. + MachineClusterIDLabel = "machine.openshift.io/cluster-api-cluster" + + // IPClaimProtectionFinalizer is placed on an IPAddressClaim by the machine reconciler + // when an IPAddressClaim associated with a machine is created. This finalizer is removed + // from the IPAddressClaim when the associated machine is deleted. + IPClaimProtectionFinalizer = "machine.openshift.io/ip-claim-protection" +) + +type MachineStatusError string + +const ( + // Represents that the combination of configuration in the MachineSpec + // is not supported by this cluster. This is not a transient error, but + // indicates a state that must be fixed before progress can be made. + // + // Example: the ProviderSpec specifies an instance type that doesn't exist, + InvalidConfigurationMachineError MachineStatusError = "InvalidConfiguration" + + // This indicates that the MachineSpec has been updated in a way that + // is not supported for reconciliation on this cluster. The spec may be + // completely valid from a configuration standpoint, but the controller + // does not support changing the real world state to match the new + // spec. + // + // Example: the responsible controller is not capable of changing the + // container runtime from docker to rkt. + UnsupportedChangeMachineError MachineStatusError = "UnsupportedChange" + + // This generally refers to exceeding one's quota in a cloud provider, + // or running out of physical machines in an on-premise environment. + InsufficientResourcesMachineError MachineStatusError = "InsufficientResources" + + // There was an error while trying to create a Node to match this + // Machine. This may indicate a transient problem that will be fixed + // automatically with time, such as a service outage, or a terminal + // error during creation that doesn't match a more specific + // MachineStatusError value. + // + // Example: timeout trying to connect to GCE. + CreateMachineError MachineStatusError = "CreateError" + + // There was an error while trying to update a Node that this + // Machine represents. This may indicate a transient problem that will be + // fixed automatically with time, such as a service outage, + // + // Example: error updating load balancers + UpdateMachineError MachineStatusError = "UpdateError" + + // An error was encountered while trying to delete the Node that this + // Machine represents. This could be a transient or terminal error, but + // will only be observable if the provider's Machine controller has + // added a finalizer to the object to more gracefully handle deletions. + // + // Example: cannot resolve EC2 IP address. + DeleteMachineError MachineStatusError = "DeleteError" + + // TemplateClonedFromGroupKindAnnotation is the infrastructure machine + // annotation that stores the group-kind of the infrastructure template resource + // that was cloned for the machine. This annotation is set only during cloning a + // template. Older/adopted machines will not have this annotation. + TemplateClonedFromGroupKindAnnotation = "machine.openshift.io/cloned-from-groupkind" + + // TemplateClonedFromNameAnnotation is the infrastructure machine annotation that + // stores the name of the infrastructure template resource + // that was cloned for the machine. This annotation is set only during cloning a + // template. Older/adopted machines will not have this annotation. + TemplateClonedFromNameAnnotation = "machine.openshift.io/cloned-from-name" + + // This error indicates that the machine did not join the cluster + // as a new node within the expected timeframe after instance + // creation at the provider succeeded + // + // Example use case: A controller that deletes Machines which do + // not result in a Node joining the cluster within a given timeout + // and that are managed by a MachineSet + JoinClusterTimeoutMachineError = "JoinClusterTimeoutError" + + // IPAddressInvalidReason is set to indicate that the claimed IP address is not valid. + IPAddressInvalidReason MachineStatusError = "IPAddressInvalid" +) + +type ClusterStatusError string + +const ( + // InvalidConfigurationClusterError indicates that the cluster + // configuration is invalid. + InvalidConfigurationClusterError ClusterStatusError = "InvalidConfiguration" + + // UnsupportedChangeClusterError indicates that the cluster + // spec has been updated in an unsupported way. That cannot be + // reconciled. + UnsupportedChangeClusterError ClusterStatusError = "UnsupportedChange" + + // CreateClusterError indicates that an error was encountered + // when trying to create the cluster. + CreateClusterError ClusterStatusError = "CreateError" + + // UpdateClusterError indicates that an error was encountered + // when trying to update the cluster. + UpdateClusterError ClusterStatusError = "UpdateError" + + // DeleteClusterError indicates that an error was encountered + // when trying to delete the cluster. + DeleteClusterError ClusterStatusError = "DeleteError" +) + +type MachineSetStatusError string + +const ( + // Represents that the combination of configuration in the MachineTemplateSpec + // is not supported by this cluster. This is not a transient error, but + // indicates a state that must be fixed before progress can be made. + // + // Example: the ProviderSpec specifies an instance type that doesn't exist. + InvalidConfigurationMachineSetError MachineSetStatusError = "InvalidConfiguration" +) + +type MachineDeploymentStrategyType string + +const ( + // Replace the old MachineSet by new one using rolling update + // i.e. gradually scale down the old MachineSet and scale up the new one. + RollingUpdateMachineDeploymentStrategyType MachineDeploymentStrategyType = "RollingUpdate" +) + +const ( + // PhaseFailed indicates a state that will need to be fixed before progress can be made. + // Failed machines have encountered a terminal error and must be deleted. + // https://github.com/openshift/enhancements/blob/master/enhancements/machine-instance-lifecycle.md + // e.g. Instance does NOT exist but Machine has providerID/addresses. + // e.g. Cloud service returns a 4xx response. + PhaseFailed string = "Failed" + + // PhaseProvisioning indicates the instance does NOT exist. + // The machine has NOT been given a providerID or addresses. + // Provisioning implies that the Machine API is in the process of creating the instance. + PhaseProvisioning string = "Provisioning" + + // PhaseProvisioned indicates the instance exists. + // The machine has been given a providerID and addresses. + // The machine API successfully provisioned an instance which has not yet joined the cluster, + // as such, the machine has NOT yet been given a nodeRef. + PhaseProvisioned string = "Provisioned" + + // PhaseRunning indicates the instance exists and the node has joined the cluster. + // The machine has been given a providerID, addresses, and a nodeRef. + PhaseRunning string = "Running" + + // PhaseDeleting indicates the machine has a deletion timestamp and that the + // Machine API is now in the process of removing the machine from the cluster. + PhaseDeleting string = "Deleting" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Machine is the Schema for the machines API +// +k8s:openapi-gen=true +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="Phase",type="string",JSONPath=".status.phase",description="Phase of machine" +// +kubebuilder:printcolumn:name="Type",type="string",JSONPath=".metadata.labels['machine\\.openshift\\.io/instance-type']",description="Type of instance" +// +kubebuilder:printcolumn:name="Region",type="string",JSONPath=".metadata.labels['machine\\.openshift\\.io/region']",description="Region associated with machine" +// +kubebuilder:printcolumn:name="Zone",type="string",JSONPath=".metadata.labels['machine\\.openshift\\.io/zone']",description="Zone associated with machine" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Machine age" +// +kubebuilder:printcolumn:name="Node",type="string",JSONPath=".status.nodeRef.name",description="Node associated with machine",priority=1 +// +kubebuilder:printcolumn:name="ProviderID",type="string",JSONPath=".spec.providerID",description="Provider ID of machine created in cloud provider",priority=1 +// +kubebuilder:printcolumn:name="State",type="string",JSONPath=".metadata.annotations['machine\\.openshift\\.io/instance-state']",description="State of instance",priority=1 +// Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=2 +type Machine struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec MachineSpec `json:"spec,omitempty"` + Status MachineStatus `json:"status,omitempty"` +} + +// MachineSpec defines the desired state of Machine +type MachineSpec struct { + // ObjectMeta will autopopulate the Node created. Use this to + // indicate what labels, annotations, name prefix, etc., should be used + // when creating the Node. + // +optional + ObjectMeta `json:"metadata,omitempty"` + + // LifecycleHooks allow users to pause operations on the machine at + // certain predefined points within the machine lifecycle. + // +optional + LifecycleHooks LifecycleHooks `json:"lifecycleHooks,omitempty"` + + // The list of the taints to be applied to the corresponding Node in additive + // manner. This list will not overwrite any other taints added to the Node on + // an ongoing basis by other entities. These taints should be actively reconciled + // e.g. if you ask the machine controller to apply a taint and then manually remove + // the taint the machine controller will put it back) but not have the machine controller + // remove any taints + // +optional + Taints []corev1.Taint `json:"taints,omitempty"` + + // ProviderSpec details Provider-specific configuration to use during node creation. + // +optional + ProviderSpec ProviderSpec `json:"providerSpec"` + + // ProviderID is the identification ID of the machine provided by the provider. + // This field must match the provider ID as seen on the node object corresponding to this machine. + // This field is required by higher level consumers of cluster-api. Example use case is cluster autoscaler + // with cluster-api as provider. Clean-up logic in the autoscaler compares machines to nodes to find out + // machines at provider which could not get registered as Kubernetes nodes. With cluster-api as a + // generic out-of-tree provider for autoscaler, this field is required by autoscaler to be + // able to have a provider view of the list of machines. Another list of nodes is queried from the k8s apiserver + // and then a comparison is done to find out unregistered machines and are marked for delete. + // This field will be set by the actuators and consumed by higher level entities like autoscaler that will + // be interfacing with cluster-api as generic provider. + // +optional + ProviderID *string `json:"providerID,omitempty"` +} + +// LifecycleHooks allow users to pause operations on the machine at +// certain prefedined points within the machine lifecycle. +type LifecycleHooks struct { + // PreDrain hooks prevent the machine from being drained. + // This also blocks further lifecycle events, such as termination. + // +listType=map + // +listMapKey=name + // +optional + PreDrain []LifecycleHook `json:"preDrain,omitempty"` + + // PreTerminate hooks prevent the machine from being terminated. + // PreTerminate hooks be actioned after the Machine has been drained. + // +listType=map + // +listMapKey=name + // +optional + PreTerminate []LifecycleHook `json:"preTerminate,omitempty"` +} + +// LifecycleHook represents a single instance of a lifecycle hook +type LifecycleHook struct { + // Name defines a unique name for the lifcycle hook. + // The name should be unique and descriptive, ideally 1-3 words, in CamelCase or + // it may be namespaced, eg. foo.example.com/CamelCase. + // Names must be unique and should only be managed by a single entity. + // +kubebuilder:validation:Pattern=`^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$` + // +kubebuilder:validation:MinLength:=3 + // +kubebuilder:validation:MaxLength:=256 + // +kubebuilder:validation:Required + Name string `json:"name"` + + // Owner defines the owner of the lifecycle hook. + // This should be descriptive enough so that users can identify + // who/what is responsible for blocking the lifecycle. + // This could be the name of a controller (e.g. clusteroperator/etcd) + // or an administrator managing the hook. + // +kubebuilder:validation:MinLength:=3 + // +kubebuilder:validation:MaxLength:=512 + // +kubebuilder:validation:Required + Owner string `json:"owner"` +} + +// MachineStatus defines the observed state of Machine +type MachineStatus struct { + // NodeRef will point to the corresponding Node if it exists. + // +optional + NodeRef *corev1.ObjectReference `json:"nodeRef,omitempty"` + + // LastUpdated identifies when this status was last observed. + // +optional + LastUpdated *metav1.Time `json:"lastUpdated,omitempty"` + + // ErrorReason will be set in the event that there is a terminal problem + // reconciling the Machine and will contain a succinct value suitable + // for machine interpretation. + // + // This field should not be set for transitive errors that a controller + // faces that are expected to be fixed automatically over + // time (like service outages), but instead indicate that something is + // fundamentally wrong with the Machine's spec or the configuration of + // the controller, and that manual intervention is required. Examples + // of terminal errors would be invalid combinations of settings in the + // spec, values that are unsupported by the controller, or the + // responsible controller itself being critically misconfigured. + // + // Any transient errors that occur during the reconciliation of Machines + // can be added as events to the Machine object and/or logged in the + // controller's output. + // +optional + ErrorReason *MachineStatusError `json:"errorReason,omitempty"` + + // ErrorMessage will be set in the event that there is a terminal problem + // reconciling the Machine and will contain a more verbose string suitable + // for logging and human consumption. + // + // This field should not be set for transitive errors that a controller + // faces that are expected to be fixed automatically over + // time (like service outages), but instead indicate that something is + // fundamentally wrong with the Machine's spec or the configuration of + // the controller, and that manual intervention is required. Examples + // of terminal errors would be invalid combinations of settings in the + // spec, values that are unsupported by the controller, or the + // responsible controller itself being critically misconfigured. + // + // Any transient errors that occur during the reconciliation of Machines + // can be added as events to the Machine object and/or logged in the + // controller's output. + // +optional + ErrorMessage *string `json:"errorMessage,omitempty"` + + // ProviderStatus details a Provider-specific status. + // It is recommended that providers maintain their + // own versioned API types that should be + // serialized/deserialized from this field. + // +optional + // +kubebuilder:validation:XPreserveUnknownFields + ProviderStatus *runtime.RawExtension `json:"providerStatus,omitempty"` + + // Addresses is a list of addresses assigned to the machine. Queried from cloud provider, if available. + // +optional + Addresses []corev1.NodeAddress `json:"addresses,omitempty"` + + // LastOperation describes the last-operation performed by the machine-controller. + // This API should be useful as a history in terms of the latest operation performed on the + // specific machine. It should also convey the state of the latest-operation for example if + // it is still on-going, failed or completed successfully. + // +optional + LastOperation *LastOperation `json:"lastOperation,omitempty"` + + // Phase represents the current phase of machine actuation. + // One of: Failed, Provisioning, Provisioned, Running, Deleting + // +optional + Phase *string `json:"phase,omitempty"` + + // Conditions defines the current state of the Machine + Conditions Conditions `json:"conditions,omitempty"` +} + +// LastOperation represents the detail of the last performed operation on the MachineObject. +type LastOperation struct { + // Description is the human-readable description of the last operation. + Description *string `json:"description,omitempty"` + + // LastUpdated is the timestamp at which LastOperation API was last-updated. + LastUpdated *metav1.Time `json:"lastUpdated,omitempty"` + + // State is the current status of the last performed operation. + // E.g. Processing, Failed, Successful etc + State *string `json:"state,omitempty"` + + // Type is the type of operation which was last performed. + // E.g. Create, Delete, Update etc + Type *string `json:"type,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// MachineList contains a list of Machine +// Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=2 +type MachineList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty"` + + Items []Machine `json:"items"` +} diff --git a/vendor/github.com/openshift/api/machine/v1beta1/types_machinehealthcheck.go b/vendor/github.com/openshift/api/machine/v1beta1/types_machinehealthcheck.go new file mode 100644 index 000000000..1ad80fe25 --- /dev/null +++ b/vendor/github.com/openshift/api/machine/v1beta1/types_machinehealthcheck.go @@ -0,0 +1,142 @@ +package v1beta1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +// RemediationStrategyType contains remediation strategy type +type RemediationStrategyType string + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// MachineHealthCheck is the Schema for the machinehealthchecks API +// +kubebuilder:subresource:status +// +kubebuilder:resource:shortName=mhc;mhcs +// +k8s:openapi-gen=true +// +kubebuilder:printcolumn:name="MaxUnhealthy",type="string",JSONPath=".spec.maxUnhealthy",description="Maximum number of unhealthy machines allowed" +// +kubebuilder:printcolumn:name="ExpectedMachines",type="integer",JSONPath=".status.expectedMachines",description="Number of machines currently monitored" +// +kubebuilder:printcolumn:name="CurrentHealthy",type="integer",JSONPath=".status.currentHealthy",description="Current observed healthy machines" +// Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=2 +type MachineHealthCheck struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty"` + + // Specification of machine health check policy + // +optional + Spec MachineHealthCheckSpec `json:"spec,omitempty"` + + // Most recently observed status of MachineHealthCheck resource + // +optional + Status MachineHealthCheckStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// MachineHealthCheckList contains a list of MachineHealthCheck +// Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=2 +type MachineHealthCheckList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty"` + + Items []MachineHealthCheck `json:"items"` +} + +// MachineHealthCheckSpec defines the desired state of MachineHealthCheck +type MachineHealthCheckSpec struct { + // Label selector to match machines whose health will be exercised. + // Note: An empty selector will match all machines. + Selector metav1.LabelSelector `json:"selector"` + + // UnhealthyConditions contains a list of the conditions that determine + // whether a node is considered unhealthy. The conditions are combined in a + // logical OR, i.e. if any of the conditions is met, the node is unhealthy. + // + // +kubebuilder:validation:MinItems=1 + UnhealthyConditions []UnhealthyCondition `json:"unhealthyConditions"` + + // Any farther remediation is only allowed if at most "MaxUnhealthy" machines selected by + // "selector" are not healthy. + // Expects either a postive integer value or a percentage value. + // Percentage values must be positive whole numbers and are capped at 100%. + // Both 0 and 0% are valid and will block all remediation. + // +kubebuilder:default:="100%" + // +kubebuilder:validation:XIntOrString + // +kubebuilder:validation:Pattern="^((100|[0-9]{1,2})%|[0-9]+)$" + // +optional + MaxUnhealthy *intstr.IntOrString `json:"maxUnhealthy,omitempty"` + + // Machines older than this duration without a node will be considered to have + // failed and will be remediated. + // To prevent Machines without Nodes from being removed, disable startup checks + // by setting this value explicitly to "0". + // Expects an unsigned duration string of decimal numbers each with optional + // fraction and a unit suffix, eg "300ms", "1.5h" or "2h45m". + // Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". + // +optional + // +kubebuilder:default:="10m" + // +kubebuilder:validation:Pattern="^0|([0-9]+(\\.[0-9]+)?(ns|us|µs|ms|s|m|h))+$" + // +kubebuilder:validation:Type:=string + // +optional + NodeStartupTimeout *metav1.Duration `json:"nodeStartupTimeout,omitempty"` + + // RemediationTemplate is a reference to a remediation template + // provided by an infrastructure provider. + // + // This field is completely optional, when filled, the MachineHealthCheck controller + // creates a new object from the template referenced and hands off remediation of the machine to + // a controller that lives outside of Machine API Operator. + // +optional + RemediationTemplate *corev1.ObjectReference `json:"remediationTemplate,omitempty"` +} + +// UnhealthyCondition represents a Node condition type and value with a timeout +// specified as a duration. When the named condition has been in the given +// status for at least the timeout value, a node is considered unhealthy. +type UnhealthyCondition struct { + // +kubebuilder:validation:Type=string + // +kubebuilder:validation:MinLength=1 + Type corev1.NodeConditionType `json:"type"` + + // +kubebuilder:validation:Type=string + // +kubebuilder:validation:MinLength=1 + Status corev1.ConditionStatus `json:"status"` + + // Expects an unsigned duration string of decimal numbers each with optional + // fraction and a unit suffix, eg "300ms", "1.5h" or "2h45m". + // Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". + // +kubebuilder:validation:Pattern="^([0-9]+(\\.[0-9]+)?(ns|us|µs|ms|s|m|h))+$" + // +kubebuilder:validation:Type:=string + Timeout metav1.Duration `json:"timeout"` +} + +// MachineHealthCheckStatus defines the observed state of MachineHealthCheck +type MachineHealthCheckStatus struct { + // total number of machines counted by this machine health check + // +kubebuilder:validation:Minimum=0 + ExpectedMachines *int `json:"expectedMachines"` + + // total number of machines counted by this machine health check + // +kubebuilder:validation:Minimum=0 + CurrentHealthy *int `json:"currentHealthy"` + + // RemediationsAllowed is the number of further remediations allowed by this machine health check before + // maxUnhealthy short circuiting will be applied + // +kubebuilder:validation:Minimum=0 + // +optional + RemediationsAllowed int32 `json:"remediationsAllowed"` + + // Conditions defines the current state of the MachineHealthCheck + // +optional + Conditions Conditions `json:"conditions,omitempty"` +} diff --git a/vendor/github.com/openshift/api/machine/v1beta1/types_machineset.go b/vendor/github.com/openshift/api/machine/v1beta1/types_machineset.go new file mode 100644 index 000000000..fb5afebc1 --- /dev/null +++ b/vendor/github.com/openshift/api/machine/v1beta1/types_machineset.go @@ -0,0 +1,145 @@ +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// MachineSet ensures that a specified number of machines replicas are running at any given time. +// +k8s:openapi-gen=true +// +kubebuilder:subresource:status +// +kubebuilder:subresource:scale:specpath=.spec.replicas,statuspath=.status.replicas,selectorpath=.status.labelSelector +// +kubebuilder:printcolumn:name="Desired",type="integer",JSONPath=".spec.replicas",description="Desired Replicas" +// +kubebuilder:printcolumn:name="Current",type="integer",JSONPath=".status.replicas",description="Current Replicas" +// +kubebuilder:printcolumn:name="Ready",type="integer",JSONPath=".status.readyReplicas",description="Ready Replicas" +// +kubebuilder:printcolumn:name="Available",type="string",JSONPath=".status.availableReplicas",description="Observed number of available replicas" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Machineset age" +// Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=2 +type MachineSet struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec MachineSetSpec `json:"spec,omitempty"` + Status MachineSetStatus `json:"status,omitempty"` +} + +// MachineSetSpec defines the desired state of MachineSet +type MachineSetSpec struct { + // Replicas is the number of desired replicas. + // This is a pointer to distinguish between explicit zero and unspecified. + // Defaults to 1. + // +kubebuilder:default=1 + Replicas *int32 `json:"replicas,omitempty"` + // MinReadySeconds is the minimum number of seconds for which a newly created machine should be ready. + // Defaults to 0 (machine will be considered available as soon as it is ready) + // +optional + MinReadySeconds int32 `json:"minReadySeconds,omitempty"` + // DeletePolicy defines the policy used to identify nodes to delete when downscaling. + // Defaults to "Random". Valid values are "Random, "Newest", "Oldest" + // +kubebuilder:validation:Enum=Random;Newest;Oldest + DeletePolicy string `json:"deletePolicy,omitempty"` + // Selector is a label query over machines that should match the replica count. + // Label keys and values that must match in order to be controlled by this MachineSet. + // It must match the machine template's labels. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + Selector metav1.LabelSelector `json:"selector"` + // Template is the object that describes the machine that will be created if + // insufficient replicas are detected. + // +optional + Template MachineTemplateSpec `json:"template,omitempty"` +} + +// MachineSetDeletePolicy defines how priority is assigned to nodes to delete when +// downscaling a MachineSet. Defaults to "Random". +type MachineSetDeletePolicy string + +const ( + // RandomMachineSetDeletePolicy prioritizes both Machines that have the annotation + // "cluster.k8s.io/delete-machine=yes" and Machines that are unhealthy + // (Status.ErrorReason or Status.ErrorMessage are set to a non-empty value). + // Finally, it picks Machines at random to delete. + RandomMachineSetDeletePolicy MachineSetDeletePolicy = "Random" + // NewestMachineSetDeletePolicy prioritizes both Machines that have the annotation + // "cluster.k8s.io/delete-machine=yes" and Machines that are unhealthy + // (Status.ErrorReason or Status.ErrorMessage are set to a non-empty value). + // It then prioritizes the newest Machines for deletion based on the Machine's CreationTimestamp. + NewestMachineSetDeletePolicy MachineSetDeletePolicy = "Newest" + // OldestMachineSetDeletePolicy prioritizes both Machines that have the annotation + // "cluster.k8s.io/delete-machine=yes" and Machines that are unhealthy + // (Status.ErrorReason or Status.ErrorMessage are set to a non-empty value). + // It then prioritizes the oldest Machines for deletion based on the Machine's CreationTimestamp. + OldestMachineSetDeletePolicy MachineSetDeletePolicy = "Oldest" +) + +// MachineTemplateSpec describes the data needed to create a Machine from a template +type MachineTemplateSpec struct { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + ObjectMeta `json:"metadata,omitempty"` + // Specification of the desired behavior of the machine. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Spec MachineSpec `json:"spec,omitempty"` +} + +// MachineSetStatus defines the observed state of MachineSet +type MachineSetStatus struct { + // Replicas is the most recently observed number of replicas. + Replicas int32 `json:"replicas"` + // The number of replicas that have labels matching the labels of the machine template of the MachineSet. + // +optional + FullyLabeledReplicas int32 `json:"fullyLabeledReplicas,omitempty"` + // The number of ready replicas for this MachineSet. A machine is considered ready when the node has been created and is "Ready". + // +optional + ReadyReplicas int32 `json:"readyReplicas,omitempty"` + // The number of available replicas (ready for at least minReadySeconds) for this MachineSet. + // +optional + AvailableReplicas int32 `json:"availableReplicas,omitempty"` + // ObservedGeneration reflects the generation of the most recently observed MachineSet. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + // In the event that there is a terminal problem reconciling the + // replicas, both ErrorReason and ErrorMessage will be set. ErrorReason + // will be populated with a succinct value suitable for machine + // interpretation, while ErrorMessage will contain a more verbose + // string suitable for logging and human consumption. + // + // These fields should not be set for transitive errors that a + // controller faces that are expected to be fixed automatically over + // time (like service outages), but instead indicate that something is + // fundamentally wrong with the MachineTemplate's spec or the configuration of + // the machine controller, and that manual intervention is required. Examples + // of terminal errors would be invalid combinations of settings in the + // spec, values that are unsupported by the machine controller, or the + // responsible machine controller itself being critically misconfigured. + // + // Any transient errors that occur during the reconciliation of Machines + // can be added as events to the MachineSet object and/or logged in the + // controller's output. + // +optional + ErrorReason *MachineSetStatusError `json:"errorReason,omitempty"` + // +optional + ErrorMessage *string `json:"errorMessage,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// MachineSetList contains a list of MachineSet +// Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=2 +type MachineSetList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty"` + + Items []MachineSet `json:"items"` +} diff --git a/vendor/github.com/openshift/api/machine/v1beta1/types_provider.go b/vendor/github.com/openshift/api/machine/v1beta1/types_provider.go new file mode 100644 index 000000000..2d9bc4853 --- /dev/null +++ b/vendor/github.com/openshift/api/machine/v1beta1/types_provider.go @@ -0,0 +1,227 @@ +package v1beta1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// ProviderSpec defines the configuration to use during node creation. +type ProviderSpec struct { + + // No more than one of the following may be specified. + + // Value is an inlined, serialized representation of the resource + // configuration. It is recommended that providers maintain their own + // versioned API types that should be serialized/deserialized from this + // field, akin to component config. + // +optional + // +kubebuilder:validation:XPreserveUnknownFields + Value *runtime.RawExtension `json:"value,omitempty"` +} + +// ObjectMeta is metadata that all persisted resources must have, which includes all objects +// users must create. This is a copy of customizable fields from metav1.ObjectMeta. +// +// ObjectMeta is embedded in `Machine.Spec`, `MachineDeployment.Template` and `MachineSet.Template`, +// which are not top-level Kubernetes objects. Given that metav1.ObjectMeta has lots of special cases +// and read-only fields which end up in the generated CRD validation, having it as a subset simplifies +// the API and some issues that can impact user experience. +// +// During the [upgrade to controller-tools@v2](https://github.com/kubernetes-sigs/cluster-api/pull/1054) +// for v1alpha2, we noticed a failure would occur running Cluster API test suite against the new CRDs, +// specifically `spec.metadata.creationTimestamp in body must be of type string: "null"`. +// The investigation showed that `controller-tools@v2` behaves differently than its previous version +// when handling types from [metav1](k8s.io/apimachinery/pkg/apis/meta/v1) package. +// +// In more details, we found that embedded (non-top level) types that embedded `metav1.ObjectMeta` +// had validation properties, including for `creationTimestamp` (metav1.Time). +// The `metav1.Time` type specifies a custom json marshaller that, when IsZero() is true, returns `null` +// which breaks validation because the field isn't marked as nullable. +// +// In future versions, controller-tools@v2 might allow overriding the type and validation for embedded +// types. When that happens, this hack should be revisited. +type ObjectMeta struct { + // Name must be unique within a namespace. Is required when creating resources, although + // some resources may allow a client to request the generation of an appropriate name + // automatically. Name is primarily intended for creation idempotence and configuration + // definition. + // Cannot be updated. + // More info: http://kubernetes.io/docs/user-guide/identifiers#names + // +optional + Name string `json:"name,omitempty"` + + // GenerateName is an optional prefix, used by the server, to generate a unique + // name ONLY IF the Name field has not been provided. + // If this field is used, the name returned to the client will be different + // than the name passed. This value will also be combined with a unique suffix. + // The provided value has the same validation rules as the Name field, + // and may be truncated by the length of the suffix required to make the value + // unique on the server. + // + // If this field is specified and the generated name exists, the server will + // NOT return a 409 - instead, it will either return 201 Created or 500 with Reason + // ServerTimeout indicating a unique name could not be found in the time allotted, and the client + // should retry (optionally after the time indicated in the Retry-After header). + // + // Applied only if Name is not specified. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency + // +optional + GenerateName string `json:"generateName,omitempty"` + + // Namespace defines the space within each name must be unique. An empty namespace is + // equivalent to the "default" namespace, but "default" is the canonical representation. + // Not all objects are required to be scoped to a namespace - the value of this field for + // those objects will be empty. + // + // Must be a DNS_LABEL. + // Cannot be updated. + // More info: http://kubernetes.io/docs/user-guide/namespaces + // +optional + Namespace string `json:"namespace,omitempty"` + + // Map of string keys and values that can be used to organize and categorize + // (scope and select) objects. May match selectors of replication controllers + // and services. + // More info: http://kubernetes.io/docs/user-guide/labels + // +optional + Labels map[string]string `json:"labels,omitempty"` + + // Annotations is an unstructured key value map stored with a resource that may be + // set by external tools to store and retrieve arbitrary metadata. They are not + // queryable and should be preserved when modifying objects. + // More info: http://kubernetes.io/docs/user-guide/annotations + // +optional + Annotations map[string]string `json:"annotations,omitempty"` + + // List of objects depended by this object. If ALL objects in the list have + // been deleted, this object will be garbage collected. If this object is managed by a controller, + // then an entry in this list will point to this controller, with the controller field set to true. + // There cannot be more than one managing controller. + // +optional + // +patchMergeKey=uid + // +patchStrategy=merge + OwnerReferences []metav1.OwnerReference `json:"ownerReferences,omitempty" patchStrategy:"merge" patchMergeKey:"uid"` +} + +// ConditionSeverity expresses the severity of a Condition Type failing. +type ConditionSeverity string + +const ( + // ConditionSeverityError specifies that a condition with `Status=False` is an error. + ConditionSeverityError ConditionSeverity = "Error" + + // ConditionSeverityWarning specifies that a condition with `Status=False` is a warning. + ConditionSeverityWarning ConditionSeverity = "Warning" + + // ConditionSeverityInfo specifies that a condition with `Status=False` is informative. + ConditionSeverityInfo ConditionSeverity = "Info" + + // ConditionSeverityNone should apply only to conditions with `Status=True`. + ConditionSeverityNone ConditionSeverity = "" +) + +// ConditionType is a valid value for Condition.Type. +type ConditionType string + +// Valid conditions for a machine. +const ( + // MachineCreated indicates whether the machine has been created or not. If not, + // it should include a reason and message for the failure. + // NOTE: MachineCreation is here for historical reasons, MachineCreated should be used instead + MachineCreation ConditionType = "MachineCreation" + // MachineCreated indicates whether the machine has been created or not. If not, + // it should include a reason and message for the failure. + MachineCreated ConditionType = "MachineCreated" + // InstanceExistsCondition is set on the Machine to show whether a virtual mahcine has been created by the cloud provider. + InstanceExistsCondition ConditionType = "InstanceExists" + // RemediationAllowedCondition is set on MachineHealthChecks to show the status of whether the MachineHealthCheck is + // allowed to remediate any Machines or whether it is blocked from remediating any further. + RemediationAllowedCondition ConditionType = "RemediationAllowed" + // ExternalRemediationTemplateAvailable is set on machinehealthchecks when MachineHealthCheck controller uses external remediation. + // ExternalRemediationTemplateAvailable is set to false if external remediation template is not found. + ExternalRemediationTemplateAvailable ConditionType = "ExternalRemediationTemplateAvailable" + // ExternalRemediationRequestAvailable is set on machinehealthchecks when MachineHealthCheck controller uses external remediation. + // ExternalRemediationRequestAvailable is set to false if creating external remediation request fails. + ExternalRemediationRequestAvailable ConditionType = "ExternalRemediationRequestAvailable" + // MachineDrained is set on a machine to indicate that the machine has been drained. When an error occurs during + // the drain process, the condition will be added with a false status and details of the error. + MachineDrained ConditionType = "Drained" + // MachineDrainable is set on a machine to indicate whether or not the machine can be drained, or, whether some + // deletion hook is blocking the drain operation. + MachineDrainable ConditionType = "Drainable" + // MachineTerminable is set on a machine to indicate whether or not the machine can be terminated, or, whether some + // deletion hook is blocking the termination operation. + MachineTerminable ConditionType = "Terminable" + // IPAddressClaimedCondition is set to indicate that a machine has a claimed an IP address. + IPAddressClaimedCondition ConditionType = "IPAddressClaimed" +) + +const ( + // MachineCreationSucceeded indicates machine creation success. + MachineCreationSucceededConditionReason string = "MachineCreationSucceeded" + // MachineCreationFailed indicates machine creation failure. + MachineCreationFailedConditionReason string = "MachineCreationFailed" + // ErrorCheckingProviderReason is the reason used when the exist operation fails. + // This would normally be because we cannot contact the provider. + ErrorCheckingProviderReason = "ErrorCheckingProvider" + // InstanceMissingReason is the reason used when the machine was provisioned, but the instance has gone missing. + InstanceMissingReason = "InstanceMissing" + // InstanceNotCreatedReason is the reason used when the machine has not yet been provisioned. + InstanceNotCreatedReason = "InstanceNotCreated" + // TooManyUnhealthy is the reason used when too many Machines are unhealthy and the MachineHealthCheck is blocked + // from making any further remediations. + TooManyUnhealthyReason = "TooManyUnhealthy" + // ExternalRemediationTemplateNotFound is the reason used when a machine health check fails to find external remediation template. + ExternalRemediationTemplateNotFound = "ExternalRemediationTemplateNotFound" + // ExternalRemediationRequestCreationFailed is the reason used when a machine health check fails to create external remediation request. + ExternalRemediationRequestCreationFailed = "ExternalRemediationRequestCreationFailed" + // MachineHookPresent indicates that a machine lifecycle hook is blocking part of the lifecycle of the machine. + // This should be used with the `Drainable` and `Terminable` machine condition types. + MachineHookPresent = "HookPresent" + // MachineDrainError indicates an error occurred when draining the machine. + // This should be used with the `Drained` condition type. + MachineDrainError = "DrainError" + // WaitingForIPAddressReason is set to indicate that a machine is + // currently waiting for an IP address to be provisioned. + WaitingForIPAddressReason string = "WaitingForIPAddress" +) + +// Condition defines an observation of a Machine API resource operational state. +type Condition struct { + // Type of condition in CamelCase or in foo.example.com/CamelCase. + // Many .condition.type values are consistent across resources like Available, but because arbitrary conditions + // can be useful (see .node.status.conditions), the ability to deconflict is important. + // +required + Type ConditionType `json:"type"` + + // Status of the condition, one of True, False, Unknown. + // +required + Status corev1.ConditionStatus `json:"status"` + + // Severity provides an explicit classification of Reason code, so the users or machines can immediately + // understand the current situation and act accordingly. + // The Severity field MUST be set only when Status=False. + // +optional + Severity ConditionSeverity `json:"severity,omitempty"` + + // Last time the condition transitioned from one status to another. + // This should be when the underlying condition changed. If that is not known, then using the time when + // the API field changed is acceptable. + // +required + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` + + // The reason for the condition's last transition in CamelCase. + // The specific API may choose whether or not this field is considered a guaranteed API. + // This field may not be empty. + // +optional + Reason string `json:"reason,omitempty"` + + // A human readable message indicating details about the transition. + // This field may be empty. + // +optional + Message string `json:"message,omitempty"` +} + +// Conditions provide observations of the operational state of a Machine API resource. +type Conditions []Condition diff --git a/vendor/github.com/openshift/api/machine/v1beta1/types_vsphereprovider.go b/vendor/github.com/openshift/api/machine/v1beta1/types_vsphereprovider.go new file mode 100644 index 000000000..27245c8a4 --- /dev/null +++ b/vendor/github.com/openshift/api/machine/v1beta1/types_vsphereprovider.go @@ -0,0 +1,210 @@ +package v1beta1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// VSphereMachineProviderSpec is the type that will be embedded in a Machine.Spec.ProviderSpec field +// for an VSphere virtual machine. It is used by the vSphere machine actuator to create a single Machine. +// Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=2 +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type VSphereMachineProviderSpec struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + // UserDataSecret contains a local reference to a secret that contains the + // UserData to apply to the instance + // +optional + UserDataSecret *corev1.LocalObjectReference `json:"userDataSecret,omitempty"` + // CredentialsSecret is a reference to the secret with vSphere credentials. + // +optional + CredentialsSecret *corev1.LocalObjectReference `json:"credentialsSecret,omitempty"` + // Template is the name, inventory path, or instance UUID of the template + // used to clone new machines. + Template string `json:"template"` + // Workspace describes the workspace to use for the machine. + // +optional + Workspace *Workspace `json:"workspace,omitempty"` + // Network is the network configuration for this machine's VM. + Network NetworkSpec `json:"network"` + // NumCPUs is the number of virtual processors in a virtual machine. + // Defaults to the analogue property value in the template from which this + // machine is cloned. + // +optional + NumCPUs int32 `json:"numCPUs,omitempty"` + // NumCPUs is the number of cores among which to distribute CPUs in this + // virtual machine. + // Defaults to the analogue property value in the template from which this + // machine is cloned. + // +optional + NumCoresPerSocket int32 `json:"numCoresPerSocket,omitempty"` + // MemoryMiB is the size of a virtual machine's memory, in MiB. + // Defaults to the analogue property value in the template from which this + // machine is cloned. + // +optional + MemoryMiB int64 `json:"memoryMiB,omitempty"` + // DiskGiB is the size of a virtual machine's disk, in GiB. + // Defaults to the analogue property value in the template from which this + // machine is cloned. + // This parameter will be ignored if 'LinkedClone' CloneMode is set. + // +optional + DiskGiB int32 `json:"diskGiB,omitempty"` + // Snapshot is the name of the snapshot from which the VM was cloned + // +optional + Snapshot string `json:"snapshot"` + // CloneMode specifies the type of clone operation. + // The LinkedClone mode is only support for templates that have at least + // one snapshot. If the template has no snapshots, then CloneMode defaults + // to FullClone. + // When LinkedClone mode is enabled the DiskGiB field is ignored as it is + // not possible to expand disks of linked clones. + // Defaults to FullClone. + // When using LinkedClone, if no snapshots exist for the source template, falls back to FullClone. + // +optional + CloneMode CloneMode `json:"cloneMode,omitempty"` +} + +// CloneMode is the type of clone operation used to clone a VM from a template. +type CloneMode string + +const ( + // FullClone indicates a VM will have no relationship to the source of the + // clone operation once the operation is complete. This is the safest clone + // mode, but it is not the fastest. + FullClone CloneMode = "fullClone" + // LinkedClone means resulting VMs will be dependent upon the snapshot of + // the source VM/template from which the VM was cloned. This is the fastest + // clone mode, but it also prevents expanding a VMs disk beyond the size of + // the source VM/template. + LinkedClone CloneMode = "linkedClone" +) + +// NetworkSpec defines the virtual machine's network configuration. +type NetworkSpec struct { + // Devices defines the virtual machine's network interfaces. + Devices []NetworkDeviceSpec `json:"devices"` +} + +// AddressesFromPool is an IPAddressPool that will be used to create +// IPAddressClaims for fulfillment by an external controller. +type AddressesFromPool struct { + // group of the IP address pool type known to an external IPAM controller. + // This should be a fully qualified domain name, for example, externalipam.controller.io. + // +kubebuilder:example=externalipam.controller.io + // +kubebuilder:validation:Pattern:="^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$" + // +kubebuilder:validation:Required + Group string `json:"group"` + // resource of the IP address pool type known to an external IPAM controller. + // It is normally the plural form of the resource kind in lowercase, for example, + // ippools. + // +kubebuilder:example=ippools + // +kubebuilder:validation:Pattern:="^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" + // +kubebuilder:validation:Required + Resource string `json:"resource"` + // name of an IP address pool, for example, pool-config-1. + // +kubebuilder:example=pool-config-1 + // +kubebuilder:validation:Pattern:="^[a-z0-9]([-a-z0-9]*[a-z0-9])?$" + // +kubebuilder:validation:Required + Name string `json:"name"` +} + +// NetworkDeviceSpec defines the network configuration for a virtual machine's +// network device. +type NetworkDeviceSpec struct { + // networkName is the name of the vSphere network or port group to which the network + // device will be connected, for example, port-group-1. When not provided, the vCenter + // API will attempt to select a default network. + // The available networks (port groups) can be listed using `govc ls 'network/*'` + // +kubebuilder:example=port-group-1 + // +kubebuilder:validation:MaxLength=80 + // +optional + NetworkName string `json:"networkName,omitempty"` + + // gateway is an IPv4 or IPv6 address which represents the subnet gateway, + // for example, 192.168.1.1. + // +kubebuilder:validation:Format=ipv4 + // +kubebuilder:validation:Format=ipv6 + // +kubebuilder:example=192.168.1.1 + // +kubebuilder:example=2001:DB8:0000:0000:244:17FF:FEB6:D37D + // +optional + Gateway string `json:"gateway,omitempty"` + + // ipAddrs is a list of one or more IPv4 and/or IPv6 addresses and CIDR to assign to + // this device, for example, 192.168.1.100/24. IP addresses provided via ipAddrs are + // intended to allow explicit assignment of a machine's IP address. IP pool configurations + // provided via addressesFromPool, however, defer IP address assignment to an external controller. + // If both addressesFromPool and ipAddrs are empty or not defined, DHCP will be used to assign + // an IP address. If both ipAddrs and addressesFromPools are defined, the IP addresses associated with + // ipAddrs will be applied first followed by IP addresses from addressesFromPools. + // +kubebuilder:validation:Format=ipv4 + // +kubebuilder:validation:Format=ipv6 + // +kubebuilder:example=192.168.1.100/24 + // +kubebuilder:example=2001:DB8:0000:0000:244:17FF:FEB6:D37D/64 + // +optional + IPAddrs []string `json:"ipAddrs,omitempty"` + + // nameservers is a list of IPv4 and/or IPv6 addresses used as DNS nameservers, for example, + // 8.8.8.8. a nameserver is not provided by a fulfilled IPAddressClaim. If DHCP is not the + // source of IP addresses for this network device, nameservers should include a valid nameserver. + // +kubebuilder:validation:Format=ipv4 + // +kubebuilder:validation:Format=ipv6 + // +kubebuilder:example=8.8.8.8 + // +optional + Nameservers []string `json:"nameservers,omitempty"` + + // addressesFromPools is a list of references to IP pool types and instances which are handled + // by an external controller. addressesFromPool configurations provided via addressesFromPools + // defer IP address assignment to an external controller. IP addresses provided via ipAddrs, + // however, are intended to allow explicit assignment of a machine's IP address. If both + // addressesFromPool and ipAddrs are empty or not defined, DHCP will assign an IP address. + // If both ipAddrs and addressesFromPools are defined, the IP addresses associated with + // ipAddrs will be applied first followed by IP addresses from addressesFromPools. + // +kubebuilder:validation:Format=ipv4 + // +optional + AddressesFromPools []AddressesFromPool `json:"addressesFromPools,omitempty"` +} + +// WorkspaceConfig defines a workspace configuration for the vSphere cloud +// provider. +type Workspace struct { + // Server is the IP address or FQDN of the vSphere endpoint. + // +optional + Server string `gcfg:"server,omitempty" json:"server,omitempty"` + // Datacenter is the datacenter in which VMs are created/located. + // +optional + Datacenter string `gcfg:"datacenter,omitempty" json:"datacenter,omitempty"` + // Folder is the folder in which VMs are created/located. + // +optional + Folder string `gcfg:"folder,omitempty" json:"folder,omitempty"` + // Datastore is the datastore in which VMs are created/located. + // +optional + Datastore string `gcfg:"default-datastore,omitempty" json:"datastore,omitempty"` + // ResourcePool is the resource pool in which VMs are created/located. + // +optional + ResourcePool string `gcfg:"resourcepool-path,omitempty" json:"resourcePool,omitempty"` +} + +// VSphereMachineProviderStatus is the type that will be embedded in a Machine.Status.ProviderStatus field. +// It contains VSphere-specific status information. +// Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=2 +type VSphereMachineProviderStatus struct { + metav1.TypeMeta `json:",inline"` + + // InstanceID is the ID of the instance in VSphere + // +optional + InstanceID *string `json:"instanceId,omitempty"` + // InstanceState is the provisioning state of the VSphere Instance. + // +optional + InstanceState *string `json:"instanceState,omitempty"` + // Conditions is a set of conditions associated with the Machine to indicate + // errors or other status + Conditions []metav1.Condition `json:"conditions,omitempty"` + // TaskRef is a managed object reference to a Task related to the machine. + // This value is set automatically at runtime and should not be set or + // modified by users. + // +optional + TaskRef string `json:"taskRef,omitempty"` +} diff --git a/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 000000000..4cca127cb --- /dev/null +++ b/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,1861 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + intstr "k8s.io/apimachinery/pkg/util/intstr" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSMachineProviderConfig) DeepCopyInto(out *AWSMachineProviderConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.AMI.DeepCopyInto(&out.AMI) + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make([]TagSpecification, len(*in)) + copy(*out, *in) + } + if in.IAMInstanceProfile != nil { + in, out := &in.IAMInstanceProfile, &out.IAMInstanceProfile + *out = new(AWSResourceReference) + (*in).DeepCopyInto(*out) + } + if in.UserDataSecret != nil { + in, out := &in.UserDataSecret, &out.UserDataSecret + *out = new(v1.LocalObjectReference) + **out = **in + } + if in.CredentialsSecret != nil { + in, out := &in.CredentialsSecret, &out.CredentialsSecret + *out = new(v1.LocalObjectReference) + **out = **in + } + if in.KeyName != nil { + in, out := &in.KeyName, &out.KeyName + *out = new(string) + **out = **in + } + if in.PublicIP != nil { + in, out := &in.PublicIP, &out.PublicIP + *out = new(bool) + **out = **in + } + if in.SecurityGroups != nil { + in, out := &in.SecurityGroups, &out.SecurityGroups + *out = make([]AWSResourceReference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.Subnet.DeepCopyInto(&out.Subnet) + out.Placement = in.Placement + if in.LoadBalancers != nil { + in, out := &in.LoadBalancers, &out.LoadBalancers + *out = make([]LoadBalancerReference, len(*in)) + copy(*out, *in) + } + if in.BlockDevices != nil { + in, out := &in.BlockDevices, &out.BlockDevices + *out = make([]BlockDeviceMappingSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SpotMarketOptions != nil { + in, out := &in.SpotMarketOptions, &out.SpotMarketOptions + *out = new(SpotMarketOptions) + (*in).DeepCopyInto(*out) + } + out.MetadataServiceOptions = in.MetadataServiceOptions + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSMachineProviderConfig. +func (in *AWSMachineProviderConfig) DeepCopy() *AWSMachineProviderConfig { + if in == nil { + return nil + } + out := new(AWSMachineProviderConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AWSMachineProviderConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSMachineProviderConfigList) DeepCopyInto(out *AWSMachineProviderConfigList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]AWSMachineProviderConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSMachineProviderConfigList. +func (in *AWSMachineProviderConfigList) DeepCopy() *AWSMachineProviderConfigList { + if in == nil { + return nil + } + out := new(AWSMachineProviderConfigList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSMachineProviderStatus) DeepCopyInto(out *AWSMachineProviderStatus) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.InstanceID != nil { + in, out := &in.InstanceID, &out.InstanceID + *out = new(string) + **out = **in + } + if in.InstanceState != nil { + in, out := &in.InstanceState, &out.InstanceState + *out = new(string) + **out = **in + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSMachineProviderStatus. +func (in *AWSMachineProviderStatus) DeepCopy() *AWSMachineProviderStatus { + if in == nil { + return nil + } + out := new(AWSMachineProviderStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSResourceReference) DeepCopyInto(out *AWSResourceReference) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = new(string) + **out = **in + } + if in.ARN != nil { + in, out := &in.ARN, &out.ARN + *out = new(string) + **out = **in + } + if in.Filters != nil { + in, out := &in.Filters, &out.Filters + *out = make([]Filter, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSResourceReference. +func (in *AWSResourceReference) DeepCopy() *AWSResourceReference { + if in == nil { + return nil + } + out := new(AWSResourceReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AddressesFromPool) DeepCopyInto(out *AddressesFromPool) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AddressesFromPool. +func (in *AddressesFromPool) DeepCopy() *AddressesFromPool { + if in == nil { + return nil + } + out := new(AddressesFromPool) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureBootDiagnostics) DeepCopyInto(out *AzureBootDiagnostics) { + *out = *in + if in.CustomerManaged != nil { + in, out := &in.CustomerManaged, &out.CustomerManaged + *out = new(AzureCustomerManagedBootDiagnostics) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureBootDiagnostics. +func (in *AzureBootDiagnostics) DeepCopy() *AzureBootDiagnostics { + if in == nil { + return nil + } + out := new(AzureBootDiagnostics) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureCustomerManagedBootDiagnostics) DeepCopyInto(out *AzureCustomerManagedBootDiagnostics) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureCustomerManagedBootDiagnostics. +func (in *AzureCustomerManagedBootDiagnostics) DeepCopy() *AzureCustomerManagedBootDiagnostics { + if in == nil { + return nil + } + out := new(AzureCustomerManagedBootDiagnostics) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureDiagnostics) DeepCopyInto(out *AzureDiagnostics) { + *out = *in + if in.Boot != nil { + in, out := &in.Boot, &out.Boot + *out = new(AzureBootDiagnostics) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureDiagnostics. +func (in *AzureDiagnostics) DeepCopy() *AzureDiagnostics { + if in == nil { + return nil + } + out := new(AzureDiagnostics) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureMachineProviderSpec) DeepCopyInto(out *AzureMachineProviderSpec) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.UserDataSecret != nil { + in, out := &in.UserDataSecret, &out.UserDataSecret + *out = new(v1.SecretReference) + **out = **in + } + if in.CredentialsSecret != nil { + in, out := &in.CredentialsSecret, &out.CredentialsSecret + *out = new(v1.SecretReference) + **out = **in + } + out.Image = in.Image + in.OSDisk.DeepCopyInto(&out.OSDisk) + if in.DataDisks != nil { + in, out := &in.DataDisks, &out.DataDisks + *out = make([]DataDisk, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ApplicationSecurityGroups != nil { + in, out := &in.ApplicationSecurityGroups, &out.ApplicationSecurityGroups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.NatRule != nil { + in, out := &in.NatRule, &out.NatRule + *out = new(int64) + **out = **in + } + if in.Zone != nil { + in, out := &in.Zone, &out.Zone + *out = new(string) + **out = **in + } + if in.SpotVMOptions != nil { + in, out := &in.SpotVMOptions, &out.SpotVMOptions + *out = new(SpotVMOptions) + (*in).DeepCopyInto(*out) + } + if in.SecurityProfile != nil { + in, out := &in.SecurityProfile, &out.SecurityProfile + *out = new(SecurityProfile) + (*in).DeepCopyInto(*out) + } + in.Diagnostics.DeepCopyInto(&out.Diagnostics) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureMachineProviderSpec. +func (in *AzureMachineProviderSpec) DeepCopy() *AzureMachineProviderSpec { + if in == nil { + return nil + } + out := new(AzureMachineProviderSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AzureMachineProviderSpec) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureMachineProviderStatus) DeepCopyInto(out *AzureMachineProviderStatus) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.VMID != nil { + in, out := &in.VMID, &out.VMID + *out = new(string) + **out = **in + } + if in.VMState != nil { + in, out := &in.VMState, &out.VMState + *out = new(AzureVMState) + **out = **in + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureMachineProviderStatus. +func (in *AzureMachineProviderStatus) DeepCopy() *AzureMachineProviderStatus { + if in == nil { + return nil + } + out := new(AzureMachineProviderStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BlockDeviceMappingSpec) DeepCopyInto(out *BlockDeviceMappingSpec) { + *out = *in + if in.DeviceName != nil { + in, out := &in.DeviceName, &out.DeviceName + *out = new(string) + **out = **in + } + if in.EBS != nil { + in, out := &in.EBS, &out.EBS + *out = new(EBSBlockDeviceSpec) + (*in).DeepCopyInto(*out) + } + if in.NoDevice != nil { + in, out := &in.NoDevice, &out.NoDevice + *out = new(string) + **out = **in + } + if in.VirtualName != nil { + in, out := &in.VirtualName, &out.VirtualName + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BlockDeviceMappingSpec. +func (in *BlockDeviceMappingSpec) DeepCopy() *BlockDeviceMappingSpec { + if in == nil { + return nil + } + out := new(BlockDeviceMappingSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Condition) DeepCopyInto(out *Condition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Condition. +func (in *Condition) DeepCopy() *Condition { + if in == nil { + return nil + } + out := new(Condition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in Conditions) DeepCopyInto(out *Conditions) { + { + in := &in + *out = make(Conditions, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Conditions. +func (in Conditions) DeepCopy() Conditions { + if in == nil { + return nil + } + out := new(Conditions) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfidentialVM) DeepCopyInto(out *ConfidentialVM) { + *out = *in + out.UEFISettings = in.UEFISettings + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfidentialVM. +func (in *ConfidentialVM) DeepCopy() *ConfidentialVM { + if in == nil { + return nil + } + out := new(ConfidentialVM) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataDisk) DeepCopyInto(out *DataDisk) { + *out = *in + in.ManagedDisk.DeepCopyInto(&out.ManagedDisk) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataDisk. +func (in *DataDisk) DeepCopy() *DataDisk { + if in == nil { + return nil + } + out := new(DataDisk) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DataDiskManagedDiskParameters) DeepCopyInto(out *DataDiskManagedDiskParameters) { + *out = *in + if in.DiskEncryptionSet != nil { + in, out := &in.DiskEncryptionSet, &out.DiskEncryptionSet + *out = new(DiskEncryptionSetParameters) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataDiskManagedDiskParameters. +func (in *DataDiskManagedDiskParameters) DeepCopy() *DataDiskManagedDiskParameters { + if in == nil { + return nil + } + out := new(DataDiskManagedDiskParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiskEncryptionSetParameters) DeepCopyInto(out *DiskEncryptionSetParameters) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskEncryptionSetParameters. +func (in *DiskEncryptionSetParameters) DeepCopy() *DiskEncryptionSetParameters { + if in == nil { + return nil + } + out := new(DiskEncryptionSetParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DiskSettings) DeepCopyInto(out *DiskSettings) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DiskSettings. +func (in *DiskSettings) DeepCopy() *DiskSettings { + if in == nil { + return nil + } + out := new(DiskSettings) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EBSBlockDeviceSpec) DeepCopyInto(out *EBSBlockDeviceSpec) { + *out = *in + if in.DeleteOnTermination != nil { + in, out := &in.DeleteOnTermination, &out.DeleteOnTermination + *out = new(bool) + **out = **in + } + if in.Encrypted != nil { + in, out := &in.Encrypted, &out.Encrypted + *out = new(bool) + **out = **in + } + in.KMSKey.DeepCopyInto(&out.KMSKey) + if in.Iops != nil { + in, out := &in.Iops, &out.Iops + *out = new(int64) + **out = **in + } + if in.VolumeSize != nil { + in, out := &in.VolumeSize, &out.VolumeSize + *out = new(int64) + **out = **in + } + if in.VolumeType != nil { + in, out := &in.VolumeType, &out.VolumeType + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EBSBlockDeviceSpec. +func (in *EBSBlockDeviceSpec) DeepCopy() *EBSBlockDeviceSpec { + if in == nil { + return nil + } + out := new(EBSBlockDeviceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Filter) DeepCopyInto(out *Filter) { + *out = *in + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Filter. +func (in *Filter) DeepCopy() *Filter { + if in == nil { + return nil + } + out := new(Filter) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GCPDisk) DeepCopyInto(out *GCPDisk) { + *out = *in + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.EncryptionKey != nil { + in, out := &in.EncryptionKey, &out.EncryptionKey + *out = new(GCPEncryptionKeyReference) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPDisk. +func (in *GCPDisk) DeepCopy() *GCPDisk { + if in == nil { + return nil + } + out := new(GCPDisk) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GCPEncryptionKeyReference) DeepCopyInto(out *GCPEncryptionKeyReference) { + *out = *in + if in.KMSKey != nil { + in, out := &in.KMSKey, &out.KMSKey + *out = new(GCPKMSKeyReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPEncryptionKeyReference. +func (in *GCPEncryptionKeyReference) DeepCopy() *GCPEncryptionKeyReference { + if in == nil { + return nil + } + out := new(GCPEncryptionKeyReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GCPGPUConfig) DeepCopyInto(out *GCPGPUConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPGPUConfig. +func (in *GCPGPUConfig) DeepCopy() *GCPGPUConfig { + if in == nil { + return nil + } + out := new(GCPGPUConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GCPKMSKeyReference) DeepCopyInto(out *GCPKMSKeyReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPKMSKeyReference. +func (in *GCPKMSKeyReference) DeepCopy() *GCPKMSKeyReference { + if in == nil { + return nil + } + out := new(GCPKMSKeyReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GCPMachineProviderSpec) DeepCopyInto(out *GCPMachineProviderSpec) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.UserDataSecret != nil { + in, out := &in.UserDataSecret, &out.UserDataSecret + *out = new(v1.LocalObjectReference) + **out = **in + } + if in.CredentialsSecret != nil { + in, out := &in.CredentialsSecret, &out.CredentialsSecret + *out = new(v1.LocalObjectReference) + **out = **in + } + if in.Disks != nil { + in, out := &in.Disks, &out.Disks + *out = make([]*GCPDisk, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(GCPDisk) + (*in).DeepCopyInto(*out) + } + } + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Metadata != nil { + in, out := &in.Metadata, &out.Metadata + *out = make([]*GCPMetadata, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(GCPMetadata) + (*in).DeepCopyInto(*out) + } + } + } + if in.NetworkInterfaces != nil { + in, out := &in.NetworkInterfaces, &out.NetworkInterfaces + *out = make([]*GCPNetworkInterface, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(GCPNetworkInterface) + **out = **in + } + } + } + if in.ServiceAccounts != nil { + in, out := &in.ServiceAccounts, &out.ServiceAccounts + *out = make([]GCPServiceAccount, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.TargetPools != nil { + in, out := &in.TargetPools, &out.TargetPools + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.GPUs != nil { + in, out := &in.GPUs, &out.GPUs + *out = make([]GCPGPUConfig, len(*in)) + copy(*out, *in) + } + out.ShieldedInstanceConfig = in.ShieldedInstanceConfig + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPMachineProviderSpec. +func (in *GCPMachineProviderSpec) DeepCopy() *GCPMachineProviderSpec { + if in == nil { + return nil + } + out := new(GCPMachineProviderSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GCPMachineProviderSpec) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GCPMachineProviderStatus) DeepCopyInto(out *GCPMachineProviderStatus) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.InstanceID != nil { + in, out := &in.InstanceID, &out.InstanceID + *out = new(string) + **out = **in + } + if in.InstanceState != nil { + in, out := &in.InstanceState, &out.InstanceState + *out = new(string) + **out = **in + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPMachineProviderStatus. +func (in *GCPMachineProviderStatus) DeepCopy() *GCPMachineProviderStatus { + if in == nil { + return nil + } + out := new(GCPMachineProviderStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GCPMetadata) DeepCopyInto(out *GCPMetadata) { + *out = *in + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPMetadata. +func (in *GCPMetadata) DeepCopy() *GCPMetadata { + if in == nil { + return nil + } + out := new(GCPMetadata) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GCPNetworkInterface) DeepCopyInto(out *GCPNetworkInterface) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPNetworkInterface. +func (in *GCPNetworkInterface) DeepCopy() *GCPNetworkInterface { + if in == nil { + return nil + } + out := new(GCPNetworkInterface) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GCPServiceAccount) DeepCopyInto(out *GCPServiceAccount) { + *out = *in + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPServiceAccount. +func (in *GCPServiceAccount) DeepCopy() *GCPServiceAccount { + if in == nil { + return nil + } + out := new(GCPServiceAccount) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GCPShieldedInstanceConfig) DeepCopyInto(out *GCPShieldedInstanceConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPShieldedInstanceConfig. +func (in *GCPShieldedInstanceConfig) DeepCopy() *GCPShieldedInstanceConfig { + if in == nil { + return nil + } + out := new(GCPShieldedInstanceConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Image) DeepCopyInto(out *Image) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Image. +func (in *Image) DeepCopy() *Image { + if in == nil { + return nil + } + out := new(Image) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LastOperation) DeepCopyInto(out *LastOperation) { + *out = *in + if in.Description != nil { + in, out := &in.Description, &out.Description + *out = new(string) + **out = **in + } + if in.LastUpdated != nil { + in, out := &in.LastUpdated, &out.LastUpdated + *out = (*in).DeepCopy() + } + if in.State != nil { + in, out := &in.State, &out.State + *out = new(string) + **out = **in + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LastOperation. +func (in *LastOperation) DeepCopy() *LastOperation { + if in == nil { + return nil + } + out := new(LastOperation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LifecycleHook) DeepCopyInto(out *LifecycleHook) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LifecycleHook. +func (in *LifecycleHook) DeepCopy() *LifecycleHook { + if in == nil { + return nil + } + out := new(LifecycleHook) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LifecycleHooks) DeepCopyInto(out *LifecycleHooks) { + *out = *in + if in.PreDrain != nil { + in, out := &in.PreDrain, &out.PreDrain + *out = make([]LifecycleHook, len(*in)) + copy(*out, *in) + } + if in.PreTerminate != nil { + in, out := &in.PreTerminate, &out.PreTerminate + *out = make([]LifecycleHook, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LifecycleHooks. +func (in *LifecycleHooks) DeepCopy() *LifecycleHooks { + if in == nil { + return nil + } + out := new(LifecycleHooks) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoadBalancerReference) DeepCopyInto(out *LoadBalancerReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancerReference. +func (in *LoadBalancerReference) DeepCopy() *LoadBalancerReference { + if in == nil { + return nil + } + out := new(LoadBalancerReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Machine) DeepCopyInto(out *Machine) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Machine. +func (in *Machine) DeepCopy() *Machine { + if in == nil { + return nil + } + out := new(Machine) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Machine) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineHealthCheck) DeepCopyInto(out *MachineHealthCheck) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineHealthCheck. +func (in *MachineHealthCheck) DeepCopy() *MachineHealthCheck { + if in == nil { + return nil + } + out := new(MachineHealthCheck) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MachineHealthCheck) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineHealthCheckList) DeepCopyInto(out *MachineHealthCheckList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MachineHealthCheck, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineHealthCheckList. +func (in *MachineHealthCheckList) DeepCopy() *MachineHealthCheckList { + if in == nil { + return nil + } + out := new(MachineHealthCheckList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MachineHealthCheckList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineHealthCheckSpec) DeepCopyInto(out *MachineHealthCheckSpec) { + *out = *in + in.Selector.DeepCopyInto(&out.Selector) + if in.UnhealthyConditions != nil { + in, out := &in.UnhealthyConditions, &out.UnhealthyConditions + *out = make([]UnhealthyCondition, len(*in)) + copy(*out, *in) + } + if in.MaxUnhealthy != nil { + in, out := &in.MaxUnhealthy, &out.MaxUnhealthy + *out = new(intstr.IntOrString) + **out = **in + } + if in.NodeStartupTimeout != nil { + in, out := &in.NodeStartupTimeout, &out.NodeStartupTimeout + *out = new(metav1.Duration) + **out = **in + } + if in.RemediationTemplate != nil { + in, out := &in.RemediationTemplate, &out.RemediationTemplate + *out = new(v1.ObjectReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineHealthCheckSpec. +func (in *MachineHealthCheckSpec) DeepCopy() *MachineHealthCheckSpec { + if in == nil { + return nil + } + out := new(MachineHealthCheckSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineHealthCheckStatus) DeepCopyInto(out *MachineHealthCheckStatus) { + *out = *in + if in.ExpectedMachines != nil { + in, out := &in.ExpectedMachines, &out.ExpectedMachines + *out = new(int) + **out = **in + } + if in.CurrentHealthy != nil { + in, out := &in.CurrentHealthy, &out.CurrentHealthy + *out = new(int) + **out = **in + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make(Conditions, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineHealthCheckStatus. +func (in *MachineHealthCheckStatus) DeepCopy() *MachineHealthCheckStatus { + if in == nil { + return nil + } + out := new(MachineHealthCheckStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineList) DeepCopyInto(out *MachineList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Machine, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineList. +func (in *MachineList) DeepCopy() *MachineList { + if in == nil { + return nil + } + out := new(MachineList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MachineList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineSet) DeepCopyInto(out *MachineSet) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineSet. +func (in *MachineSet) DeepCopy() *MachineSet { + if in == nil { + return nil + } + out := new(MachineSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MachineSet) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineSetList) DeepCopyInto(out *MachineSetList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MachineSet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineSetList. +func (in *MachineSetList) DeepCopy() *MachineSetList { + if in == nil { + return nil + } + out := new(MachineSetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MachineSetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineSetSpec) DeepCopyInto(out *MachineSetSpec) { + *out = *in + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } + in.Selector.DeepCopyInto(&out.Selector) + in.Template.DeepCopyInto(&out.Template) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineSetSpec. +func (in *MachineSetSpec) DeepCopy() *MachineSetSpec { + if in == nil { + return nil + } + out := new(MachineSetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineSetStatus) DeepCopyInto(out *MachineSetStatus) { + *out = *in + if in.ErrorReason != nil { + in, out := &in.ErrorReason, &out.ErrorReason + *out = new(MachineSetStatusError) + **out = **in + } + if in.ErrorMessage != nil { + in, out := &in.ErrorMessage, &out.ErrorMessage + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineSetStatus. +func (in *MachineSetStatus) DeepCopy() *MachineSetStatus { + if in == nil { + return nil + } + out := new(MachineSetStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineSpec) DeepCopyInto(out *MachineSpec) { + *out = *in + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.LifecycleHooks.DeepCopyInto(&out.LifecycleHooks) + if in.Taints != nil { + in, out := &in.Taints, &out.Taints + *out = make([]v1.Taint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.ProviderSpec.DeepCopyInto(&out.ProviderSpec) + if in.ProviderID != nil { + in, out := &in.ProviderID, &out.ProviderID + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineSpec. +func (in *MachineSpec) DeepCopy() *MachineSpec { + if in == nil { + return nil + } + out := new(MachineSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineStatus) DeepCopyInto(out *MachineStatus) { + *out = *in + if in.NodeRef != nil { + in, out := &in.NodeRef, &out.NodeRef + *out = new(v1.ObjectReference) + **out = **in + } + if in.LastUpdated != nil { + in, out := &in.LastUpdated, &out.LastUpdated + *out = (*in).DeepCopy() + } + if in.ErrorReason != nil { + in, out := &in.ErrorReason, &out.ErrorReason + *out = new(MachineStatusError) + **out = **in + } + if in.ErrorMessage != nil { + in, out := &in.ErrorMessage, &out.ErrorMessage + *out = new(string) + **out = **in + } + if in.ProviderStatus != nil { + in, out := &in.ProviderStatus, &out.ProviderStatus + *out = new(runtime.RawExtension) + (*in).DeepCopyInto(*out) + } + if in.Addresses != nil { + in, out := &in.Addresses, &out.Addresses + *out = make([]v1.NodeAddress, len(*in)) + copy(*out, *in) + } + if in.LastOperation != nil { + in, out := &in.LastOperation, &out.LastOperation + *out = new(LastOperation) + (*in).DeepCopyInto(*out) + } + if in.Phase != nil { + in, out := &in.Phase, &out.Phase + *out = new(string) + **out = **in + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make(Conditions, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineStatus. +func (in *MachineStatus) DeepCopy() *MachineStatus { + if in == nil { + return nil + } + out := new(MachineStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineTemplateSpec) DeepCopyInto(out *MachineTemplateSpec) { + *out = *in + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineTemplateSpec. +func (in *MachineTemplateSpec) DeepCopy() *MachineTemplateSpec { + if in == nil { + return nil + } + out := new(MachineTemplateSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetadataServiceOptions) DeepCopyInto(out *MetadataServiceOptions) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetadataServiceOptions. +func (in *MetadataServiceOptions) DeepCopy() *MetadataServiceOptions { + if in == nil { + return nil + } + out := new(MetadataServiceOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkDeviceSpec) DeepCopyInto(out *NetworkDeviceSpec) { + *out = *in + if in.IPAddrs != nil { + in, out := &in.IPAddrs, &out.IPAddrs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Nameservers != nil { + in, out := &in.Nameservers, &out.Nameservers + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.AddressesFromPools != nil { + in, out := &in.AddressesFromPools, &out.AddressesFromPools + *out = make([]AddressesFromPool, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkDeviceSpec. +func (in *NetworkDeviceSpec) DeepCopy() *NetworkDeviceSpec { + if in == nil { + return nil + } + out := new(NetworkDeviceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkSpec) DeepCopyInto(out *NetworkSpec) { + *out = *in + if in.Devices != nil { + in, out := &in.Devices, &out.Devices + *out = make([]NetworkDeviceSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkSpec. +func (in *NetworkSpec) DeepCopy() *NetworkSpec { + if in == nil { + return nil + } + out := new(NetworkSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OSDisk) DeepCopyInto(out *OSDisk) { + *out = *in + in.ManagedDisk.DeepCopyInto(&out.ManagedDisk) + out.DiskSettings = in.DiskSettings + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OSDisk. +func (in *OSDisk) DeepCopy() *OSDisk { + if in == nil { + return nil + } + out := new(OSDisk) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OSDiskManagedDiskParameters) DeepCopyInto(out *OSDiskManagedDiskParameters) { + *out = *in + if in.DiskEncryptionSet != nil { + in, out := &in.DiskEncryptionSet, &out.DiskEncryptionSet + *out = new(DiskEncryptionSetParameters) + **out = **in + } + out.SecurityProfile = in.SecurityProfile + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OSDiskManagedDiskParameters. +func (in *OSDiskManagedDiskParameters) DeepCopy() *OSDiskManagedDiskParameters { + if in == nil { + return nil + } + out := new(OSDiskManagedDiskParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectMeta) DeepCopyInto(out *ObjectMeta) { + *out = *in + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.OwnerReferences != nil { + in, out := &in.OwnerReferences, &out.OwnerReferences + *out = make([]metav1.OwnerReference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectMeta. +func (in *ObjectMeta) DeepCopy() *ObjectMeta { + if in == nil { + return nil + } + out := new(ObjectMeta) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Placement) DeepCopyInto(out *Placement) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Placement. +func (in *Placement) DeepCopy() *Placement { + if in == nil { + return nil + } + out := new(Placement) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProviderSpec) DeepCopyInto(out *ProviderSpec) { + *out = *in + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(runtime.RawExtension) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProviderSpec. +func (in *ProviderSpec) DeepCopy() *ProviderSpec { + if in == nil { + return nil + } + out := new(ProviderSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityProfile) DeepCopyInto(out *SecurityProfile) { + *out = *in + if in.EncryptionAtHost != nil { + in, out := &in.EncryptionAtHost, &out.EncryptionAtHost + *out = new(bool) + **out = **in + } + in.Settings.DeepCopyInto(&out.Settings) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityProfile. +func (in *SecurityProfile) DeepCopy() *SecurityProfile { + if in == nil { + return nil + } + out := new(SecurityProfile) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecuritySettings) DeepCopyInto(out *SecuritySettings) { + *out = *in + if in.ConfidentialVM != nil { + in, out := &in.ConfidentialVM, &out.ConfidentialVM + *out = new(ConfidentialVM) + **out = **in + } + if in.TrustedLaunch != nil { + in, out := &in.TrustedLaunch, &out.TrustedLaunch + *out = new(TrustedLaunch) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecuritySettings. +func (in *SecuritySettings) DeepCopy() *SecuritySettings { + if in == nil { + return nil + } + out := new(SecuritySettings) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpotMarketOptions) DeepCopyInto(out *SpotMarketOptions) { + *out = *in + if in.MaxPrice != nil { + in, out := &in.MaxPrice, &out.MaxPrice + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpotMarketOptions. +func (in *SpotMarketOptions) DeepCopy() *SpotMarketOptions { + if in == nil { + return nil + } + out := new(SpotMarketOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SpotVMOptions) DeepCopyInto(out *SpotVMOptions) { + *out = *in + if in.MaxPrice != nil { + in, out := &in.MaxPrice, &out.MaxPrice + x := (*in).DeepCopy() + *out = &x + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SpotVMOptions. +func (in *SpotVMOptions) DeepCopy() *SpotVMOptions { + if in == nil { + return nil + } + out := new(SpotVMOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TagSpecification) DeepCopyInto(out *TagSpecification) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TagSpecification. +func (in *TagSpecification) DeepCopy() *TagSpecification { + if in == nil { + return nil + } + out := new(TagSpecification) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrustedLaunch) DeepCopyInto(out *TrustedLaunch) { + *out = *in + out.UEFISettings = in.UEFISettings + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrustedLaunch. +func (in *TrustedLaunch) DeepCopy() *TrustedLaunch { + if in == nil { + return nil + } + out := new(TrustedLaunch) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UEFISettings) DeepCopyInto(out *UEFISettings) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UEFISettings. +func (in *UEFISettings) DeepCopy() *UEFISettings { + if in == nil { + return nil + } + out := new(UEFISettings) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UnhealthyCondition) DeepCopyInto(out *UnhealthyCondition) { + *out = *in + out.Timeout = in.Timeout + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UnhealthyCondition. +func (in *UnhealthyCondition) DeepCopy() *UnhealthyCondition { + if in == nil { + return nil + } + out := new(UnhealthyCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VMDiskSecurityProfile) DeepCopyInto(out *VMDiskSecurityProfile) { + *out = *in + out.DiskEncryptionSet = in.DiskEncryptionSet + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VMDiskSecurityProfile. +func (in *VMDiskSecurityProfile) DeepCopy() *VMDiskSecurityProfile { + if in == nil { + return nil + } + out := new(VMDiskSecurityProfile) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VSphereMachineProviderSpec) DeepCopyInto(out *VSphereMachineProviderSpec) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.UserDataSecret != nil { + in, out := &in.UserDataSecret, &out.UserDataSecret + *out = new(v1.LocalObjectReference) + **out = **in + } + if in.CredentialsSecret != nil { + in, out := &in.CredentialsSecret, &out.CredentialsSecret + *out = new(v1.LocalObjectReference) + **out = **in + } + if in.Workspace != nil { + in, out := &in.Workspace, &out.Workspace + *out = new(Workspace) + **out = **in + } + in.Network.DeepCopyInto(&out.Network) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VSphereMachineProviderSpec. +func (in *VSphereMachineProviderSpec) DeepCopy() *VSphereMachineProviderSpec { + if in == nil { + return nil + } + out := new(VSphereMachineProviderSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VSphereMachineProviderSpec) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VSphereMachineProviderStatus) DeepCopyInto(out *VSphereMachineProviderStatus) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.InstanceID != nil { + in, out := &in.InstanceID, &out.InstanceID + *out = new(string) + **out = **in + } + if in.InstanceState != nil { + in, out := &in.InstanceState, &out.InstanceState + *out = new(string) + **out = **in + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]metav1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VSphereMachineProviderStatus. +func (in *VSphereMachineProviderStatus) DeepCopy() *VSphereMachineProviderStatus { + if in == nil { + return nil + } + out := new(VSphereMachineProviderStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Workspace) DeepCopyInto(out *Workspace) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Workspace. +func (in *Workspace) DeepCopy() *Workspace { + if in == nil { + return nil + } + out := new(Workspace) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.swagger_doc_generated.go new file mode 100644 index 000000000..108ba557c --- /dev/null +++ b/vendor/github.com/openshift/api/machine/v1beta1/zz_generated.swagger_doc_generated.go @@ -0,0 +1,808 @@ +package v1beta1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_AWSMachineProviderConfig = map[string]string{ + "": "AWSMachineProviderConfig is the Schema for the awsmachineproviderconfigs API Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", + "ami": "AMI is the reference to the AMI from which to create the machine instance.", + "instanceType": "InstanceType is the type of instance to create. Example: m4.xlarge", + "tags": "Tags is the set of tags to add to apply to an instance, in addition to the ones added by default by the actuator. These tags are additive. The actuator will ensure these tags are present, but will not remove any other tags that may exist on the instance.", + "iamInstanceProfile": "IAMInstanceProfile is a reference to an IAM role to assign to the instance", + "userDataSecret": "UserDataSecret contains a local reference to a secret that contains the UserData to apply to the instance", + "credentialsSecret": "CredentialsSecret is a reference to the secret with AWS credentials. Otherwise, defaults to permissions provided by attached IAM role where the actuator is running.", + "keyName": "KeyName is the name of the KeyPair to use for SSH", + "deviceIndex": "DeviceIndex is the index of the device on the instance for the network interface attachment. Defaults to 0.", + "publicIp": "PublicIP specifies whether the instance should get a public IP. If not present, it should use the default of its subnet.", + "networkInterfaceType": "NetworkInterfaceType specifies the type of network interface to be used for the primary network interface. Valid values are \"ENA\", \"EFA\", and omitted, which means no opinion and the platform chooses a good default which may change over time. The current default value is \"ENA\". Please visit https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/efa.html to learn more about the AWS Elastic Fabric Adapter interface option.", + "securityGroups": "SecurityGroups is an array of references to security groups that should be applied to the instance.", + "subnet": "Subnet is a reference to the subnet to use for this instance", + "placement": "Placement specifies where to create the instance in AWS", + "loadBalancers": "LoadBalancers is the set of load balancers to which the new instance should be added once it is created.", + "blockDevices": "BlockDevices is the set of block device mapping associated to this instance, block device without a name will be used as a root device and only one device without a name is allowed https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html", + "spotMarketOptions": "SpotMarketOptions allows users to configure instances to be run using AWS Spot instances.", + "metadataServiceOptions": "MetadataServiceOptions allows users to configure instance metadata service interaction options. If nothing specified, default AWS IMDS settings will be applied. https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_InstanceMetadataOptionsRequest.html", + "placementGroupName": "PlacementGroupName specifies the name of the placement group in which to launch the instance. The placement group must already be created and may use any placement strategy. When omitted, no placement group is used when creating the EC2 instance.", +} + +func (AWSMachineProviderConfig) SwaggerDoc() map[string]string { + return map_AWSMachineProviderConfig +} + +var map_AWSMachineProviderConfigList = map[string]string{ + "": "AWSMachineProviderConfigList contains a list of AWSMachineProviderConfig Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", +} + +func (AWSMachineProviderConfigList) SwaggerDoc() map[string]string { + return map_AWSMachineProviderConfigList +} + +var map_AWSMachineProviderStatus = map[string]string{ + "": "AWSMachineProviderStatus is the type that will be embedded in a Machine.Status.ProviderStatus field. It contains AWS-specific status information. Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", + "instanceId": "InstanceID is the instance ID of the machine created in AWS", + "instanceState": "InstanceState is the state of the AWS instance for this machine", + "conditions": "Conditions is a set of conditions associated with the Machine to indicate errors or other status", +} + +func (AWSMachineProviderStatus) SwaggerDoc() map[string]string { + return map_AWSMachineProviderStatus +} + +var map_AWSResourceReference = map[string]string{ + "": "AWSResourceReference is a reference to a specific AWS resource by ID, ARN, or filters. Only one of ID, ARN or Filters may be specified. Specifying more than one will result in a validation error.", + "id": "ID of resource", + "arn": "ARN of resource", + "filters": "Filters is a set of filters used to identify a resource", +} + +func (AWSResourceReference) SwaggerDoc() map[string]string { + return map_AWSResourceReference +} + +var map_BlockDeviceMappingSpec = map[string]string{ + "": "BlockDeviceMappingSpec describes a block device mapping", + "deviceName": "The device name exposed to the machine (for example, /dev/sdh or xvdh).", + "ebs": "Parameters used to automatically set up EBS volumes when the machine is launched.", + "noDevice": "Suppresses the specified device included in the block device mapping of the AMI.", + "virtualName": "The virtual device name (ephemeralN). Machine store volumes are numbered starting from 0. An machine type with 2 available machine store volumes can specify mappings for ephemeral0 and ephemeral1.The number of available machine store volumes depends on the machine type. After you connect to the machine, you must mount the volume.\n\nConstraints: For M3 machines, you must specify machine store volumes in the block device mapping for the machine. When you launch an M3 machine, we ignore any machine store volumes specified in the block device mapping for the AMI.", +} + +func (BlockDeviceMappingSpec) SwaggerDoc() map[string]string { + return map_BlockDeviceMappingSpec +} + +var map_EBSBlockDeviceSpec = map[string]string{ + "": "EBSBlockDeviceSpec describes a block device for an EBS volume. https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/EbsBlockDevice", + "deleteOnTermination": "Indicates whether the EBS volume is deleted on machine termination.", + "encrypted": "Indicates whether the EBS volume is encrypted. Encrypted Amazon EBS volumes may only be attached to machines that support Amazon EBS encryption.", + "kmsKey": "Indicates the KMS key that should be used to encrypt the Amazon EBS volume.", + "iops": "The number of I/O operations per second (IOPS) that the volume supports. For io1, this represents the number of IOPS that are provisioned for the volume. For gp2, this represents the baseline performance of the volume and the rate at which the volume accumulates I/O credits for bursting. For more information about General Purpose SSD baseline performance, I/O credits, and bursting, see Amazon EBS Volume Types (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) in the Amazon Elastic Compute Cloud User Guide.\n\nMinimal and maximal IOPS for io1 and gp2 are constrained. Please, check https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html for precise boundaries for individual volumes.\n\nCondition: This parameter is required for requests to create io1 volumes; it is not used in requests to create gp2, st1, sc1, or standard volumes.", + "volumeSize": "The size of the volume, in GiB.\n\nConstraints: 1-16384 for General Purpose SSD (gp2), 4-16384 for Provisioned IOPS SSD (io1), 500-16384 for Throughput Optimized HDD (st1), 500-16384 for Cold HDD (sc1), and 1-1024 for Magnetic (standard) volumes. If you specify a snapshot, the volume size must be equal to or larger than the snapshot size.\n\nDefault: If you're creating the volume from a snapshot and don't specify a volume size, the default is the snapshot size.", + "volumeType": "The volume type: gp2, io1, st1, sc1, or standard. Default: standard", +} + +func (EBSBlockDeviceSpec) SwaggerDoc() map[string]string { + return map_EBSBlockDeviceSpec +} + +var map_Filter = map[string]string{ + "": "Filter is a filter used to identify an AWS resource", + "name": "Name of the filter. Filter names are case-sensitive.", + "values": "Values includes one or more filter values. Filter values are case-sensitive.", +} + +func (Filter) SwaggerDoc() map[string]string { + return map_Filter +} + +var map_LoadBalancerReference = map[string]string{ + "": "LoadBalancerReference is a reference to a load balancer on AWS.", +} + +func (LoadBalancerReference) SwaggerDoc() map[string]string { + return map_LoadBalancerReference +} + +var map_MetadataServiceOptions = map[string]string{ + "": "MetadataServiceOptions defines the options available to a user when configuring Instance Metadata Service (IMDS) Options.", + "authentication": "Authentication determines whether or not the host requires the use of authentication when interacting with the metadata service. When using authentication, this enforces v2 interaction method (IMDSv2) with the metadata service. When omitted, this means the user has no opinion and the value is left to the platform to choose a good default, which is subject to change over time. The current default is optional. At this point this field represents `HttpTokens` parameter from `InstanceMetadataOptionsRequest` structure in AWS EC2 API https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_InstanceMetadataOptionsRequest.html", +} + +func (MetadataServiceOptions) SwaggerDoc() map[string]string { + return map_MetadataServiceOptions +} + +var map_Placement = map[string]string{ + "": "Placement indicates where to create the instance in AWS", + "region": "Region is the region to use to create the instance", + "availabilityZone": "AvailabilityZone is the availability zone of the instance", + "tenancy": "Tenancy indicates if instance should run on shared or single-tenant hardware. There are supported 3 options: default, dedicated and host.", +} + +func (Placement) SwaggerDoc() map[string]string { + return map_Placement +} + +var map_SpotMarketOptions = map[string]string{ + "": "SpotMarketOptions defines the options available to a user when configuring Machines to run on Spot instances. Most users should provide an empty struct.", + "maxPrice": "The maximum price the user is willing to pay for their instances Default: On-Demand price", +} + +func (SpotMarketOptions) SwaggerDoc() map[string]string { + return map_SpotMarketOptions +} + +var map_TagSpecification = map[string]string{ + "": "TagSpecification is the name/value pair for a tag", + "name": "Name of the tag", + "value": "Value of the tag", +} + +func (TagSpecification) SwaggerDoc() map[string]string { + return map_TagSpecification +} + +var map_AzureBootDiagnostics = map[string]string{ + "": "AzureBootDiagnostics configures the boot diagnostics settings for the virtual machine. This allows you to configure capturing serial output from the virtual machine on boot. This is useful for debugging software based launch issues.", + "storageAccountType": "StorageAccountType determines if the storage account for storing the diagnostics data should be provisioned by Azure (AzureManaged) or by the customer (CustomerManaged).", + "customerManaged": "CustomerManaged provides reference to the customer manager storage account.", +} + +func (AzureBootDiagnostics) SwaggerDoc() map[string]string { + return map_AzureBootDiagnostics +} + +var map_AzureCustomerManagedBootDiagnostics = map[string]string{ + "": "AzureCustomerManagedBootDiagnostics provides reference to a customer managed storage account.", + "storageAccountURI": "StorageAccountURI is the URI of the customer managed storage account. The URI typically will be `https://.blob.core.windows.net/` but may differ if you are using Azure DNS zone endpoints. You can find the correct endpoint by looking for the Blob Primary Endpoint in the endpoints tab in the Azure console.", +} + +func (AzureCustomerManagedBootDiagnostics) SwaggerDoc() map[string]string { + return map_AzureCustomerManagedBootDiagnostics +} + +var map_AzureDiagnostics = map[string]string{ + "": "AzureDiagnostics is used to configure the diagnostic settings of the virtual machine.", + "boot": "AzureBootDiagnostics configures the boot diagnostics settings for the virtual machine. This allows you to configure capturing serial output from the virtual machine on boot. This is useful for debugging software based launch issues.", +} + +func (AzureDiagnostics) SwaggerDoc() map[string]string { + return map_AzureDiagnostics +} + +var map_AzureMachineProviderSpec = map[string]string{ + "": "AzureMachineProviderSpec is the type that will be embedded in a Machine.Spec.ProviderSpec field for an Azure virtual machine. It is used by the Azure machine actuator to create a single Machine. Required parameters such as location that are not specified by this configuration, will be defaulted by the actuator. Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", + "userDataSecret": "UserDataSecret contains a local reference to a secret that contains the UserData to apply to the instance", + "credentialsSecret": "CredentialsSecret is a reference to the secret with Azure credentials.", + "location": "Location is the region to use to create the instance", + "vmSize": "VMSize is the size of the VM to create.", + "image": "Image is the OS image to use to create the instance.", + "osDisk": "OSDisk represents the parameters for creating the OS disk.", + "dataDisks": "DataDisk specifies the parameters that are used to add one or more data disks to the machine.", + "sshPublicKey": "SSHPublicKey is the public key to use to SSH to the virtual machine.", + "publicIP": "PublicIP if true a public IP will be used", + "tags": "Tags is a list of tags to apply to the machine.", + "securityGroup": "Network Security Group that needs to be attached to the machine's interface. No security group will be attached if empty.", + "applicationSecurityGroups": "Application Security Groups that need to be attached to the machine's interface. No application security groups will be attached if zero-length.", + "subnet": "Subnet to use for this instance", + "publicLoadBalancer": "PublicLoadBalancer to use for this instance", + "internalLoadBalancer": "InternalLoadBalancerName to use for this instance", + "natRule": "NatRule to set inbound NAT rule of the load balancer", + "managedIdentity": "ManagedIdentity to set managed identity name", + "vnet": "Vnet to set virtual network name", + "zone": "Availability Zone for the virtual machine. If nil, the virtual machine should be deployed to no zone", + "networkResourceGroup": "NetworkResourceGroup is the resource group for the virtual machine's network", + "resourceGroup": "ResourceGroup is the resource group for the virtual machine", + "spotVMOptions": "SpotVMOptions allows the ability to specify the Machine should use a Spot VM", + "securityProfile": "SecurityProfile specifies the Security profile settings for a virtual machine.", + "ultraSSDCapability": "UltraSSDCapability enables or disables Azure UltraSSD capability for a virtual machine. This can be used to allow/disallow binding of Azure UltraSSD to the Machine both as Data Disks or via Persistent Volumes. This Azure feature is subject to a specific scope and certain limitations. More informations on this can be found in the official Azure documentation for Ultra Disks: (https://docs.microsoft.com/en-us/azure/virtual-machines/disks-enable-ultra-ssd?tabs=azure-portal#ga-scope-and-limitations).\n\nWhen omitted, if at least one Data Disk of type UltraSSD is specified, the platform will automatically enable the capability. If a Perisistent Volume backed by an UltraSSD is bound to a Pod on the Machine, when this field is ommitted, the platform will *not* automatically enable the capability (unless already enabled by the presence of an UltraSSD as Data Disk). This may manifest in the Pod being stuck in `ContainerCreating` phase. This defaulting behaviour may be subject to change in future.\n\nWhen set to \"Enabled\", if the capability is available for the Machine based on the scope and limitations described above, the capability will be set on the Machine. This will thus allow UltraSSD both as Data Disks and Persistent Volumes. If set to \"Enabled\" when the capability can't be available due to scope and limitations, the Machine will go into \"Failed\" state.\n\nWhen set to \"Disabled\", UltraSSDs will not be allowed either as Data Disks nor as Persistent Volumes. In this case if any UltraSSDs are specified as Data Disks on a Machine, the Machine will go into a \"Failed\" state. If instead any UltraSSDs are backing the volumes (via Persistent Volumes) of any Pods scheduled on a Node which is backed by the Machine, the Pod may get stuck in `ContainerCreating` phase.", + "acceleratedNetworking": "AcceleratedNetworking enables or disables Azure accelerated networking feature. Set to false by default. If true, then this will depend on whether the requested VMSize is supported. If set to true with an unsupported VMSize, Azure will return an error.", + "availabilitySet": "AvailabilitySet specifies the availability set to use for this instance. Availability set should be precreated, before using this field.", + "diagnostics": "Diagnostics configures the diagnostics settings for the virtual machine. This allows you to configure boot diagnostics such as capturing serial output from the virtual machine on boot. This is useful for debugging software based launch issues.", +} + +func (AzureMachineProviderSpec) SwaggerDoc() map[string]string { + return map_AzureMachineProviderSpec +} + +var map_AzureMachineProviderStatus = map[string]string{ + "": "AzureMachineProviderStatus is the type that will be embedded in a Machine.Status.ProviderStatus field. It contains Azure-specific status information. Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", + "vmId": "VMID is the ID of the virtual machine created in Azure.", + "vmState": "VMState is the provisioning state of the Azure virtual machine.", + "conditions": "Conditions is a set of conditions associated with the Machine to indicate errors or other status.", +} + +func (AzureMachineProviderStatus) SwaggerDoc() map[string]string { + return map_AzureMachineProviderStatus +} + +var map_ConfidentialVM = map[string]string{ + "": "ConfidentialVM defines the UEFI settings for the virtual machine.", + "uefiSettings": "uefiSettings specifies the security settings like secure boot and vTPM used while creating the virtual machine.", +} + +func (ConfidentialVM) SwaggerDoc() map[string]string { + return map_ConfidentialVM +} + +var map_DataDisk = map[string]string{ + "": "DataDisk specifies the parameters that are used to add one or more data disks to the machine. A Data Disk is a managed disk that's attached to a virtual machine to store application data. It differs from an OS Disk as it doesn't come with a pre-installed OS, and it cannot contain the boot volume. It is registered as SCSI drive and labeled with the chosen `lun`. e.g. for `lun: 0` the raw disk device will be available at `/dev/disk/azure/scsi1/lun0`.\n\nAs the Data Disk disk device is attached raw to the virtual machine, it will need to be partitioned, formatted with a filesystem and mounted, in order for it to be usable. This can be done by creating a custom userdata Secret with custom Ignition configuration to achieve the desired initialization. At this stage the previously defined `lun` is to be used as the \"device\" key for referencing the raw disk device to be initialized. Once the custom userdata Secret has been created, it can be referenced in the Machine's `.providerSpec.userDataSecret`. For further guidance and examples, please refer to the official OpenShift docs.", + "nameSuffix": "NameSuffix is the suffix to be appended to the machine name to generate the disk name. Each disk name will be in format _. NameSuffix name must start and finish with an alphanumeric character and can only contain letters, numbers, underscores, periods or hyphens. The overall disk name must not exceed 80 chars in length.", + "diskSizeGB": "DiskSizeGB is the size in GB to assign to the data disk.", + "managedDisk": "ManagedDisk specifies the Managed Disk parameters for the data disk. Empty value means no opinion and the platform chooses a default, which is subject to change over time. Currently the default is a ManagedDisk with with storageAccountType: \"Premium_LRS\" and diskEncryptionSet.id: \"Default\".", + "lun": "Lun Specifies the logical unit number of the data disk. This value is used to identify data disks within the VM and therefore must be unique for each data disk attached to a VM. This value is also needed for referencing the data disks devices within userdata to perform disk initialization through Ignition (e.g. partition/format/mount). The value must be between 0 and 63.", + "cachingType": "CachingType specifies the caching requirements. Empty value means no opinion and the platform chooses a default, which is subject to change over time. Currently the default is CachingTypeNone.", + "deletionPolicy": "DeletionPolicy specifies the data disk deletion policy upon Machine deletion. Possible values are \"Delete\",\"Detach\". When \"Delete\" is used the data disk is deleted when the Machine is deleted. When \"Detach\" is used the data disk is detached from the Machine and retained when the Machine is deleted.", +} + +func (DataDisk) SwaggerDoc() map[string]string { + return map_DataDisk +} + +var map_DataDiskManagedDiskParameters = map[string]string{ + "": "DataDiskManagedDiskParameters is the parameters of a DataDisk managed disk.", + "storageAccountType": "StorageAccountType is the storage account type to use. Possible values include \"Standard_LRS\", \"Premium_LRS\" and \"UltraSSD_LRS\".", + "diskEncryptionSet": "DiskEncryptionSet is the disk encryption set properties. Empty value means no opinion and the platform chooses a default, which is subject to change over time. Currently the default is a DiskEncryptionSet with id: \"Default\".", +} + +func (DataDiskManagedDiskParameters) SwaggerDoc() map[string]string { + return map_DataDiskManagedDiskParameters +} + +var map_DiskEncryptionSetParameters = map[string]string{ + "": "DiskEncryptionSetParameters is the disk encryption set properties", + "id": "ID is the disk encryption set ID Empty value means no opinion and the platform chooses a default, which is subject to change over time. Currently the default is: \"Default\".", +} + +func (DiskEncryptionSetParameters) SwaggerDoc() map[string]string { + return map_DiskEncryptionSetParameters +} + +var map_DiskSettings = map[string]string{ + "": "DiskSettings describe ephemeral disk settings for the os disk.", + "ephemeralStorageLocation": "EphemeralStorageLocation enables ephemeral OS when set to 'Local'. Possible values include: 'Local'. See https://docs.microsoft.com/en-us/azure/virtual-machines/ephemeral-os-disks for full details. Empty value means no opinion and the platform chooses a default, which is subject to change over time. Currently the default is that disks are saved to remote Azure storage.", +} + +func (DiskSettings) SwaggerDoc() map[string]string { + return map_DiskSettings +} + +var map_Image = map[string]string{ + "": "Image is a mirror of azure sdk compute.ImageReference", + "publisher": "Publisher is the name of the organization that created the image", + "offer": "Offer specifies the name of a group of related images created by the publisher. For example, UbuntuServer, WindowsServer", + "sku": "SKU specifies an instance of an offer, such as a major release of a distribution. For example, 18.04-LTS, 2019-Datacenter", + "version": "Version specifies the version of an image sku. The allowed formats are Major.Minor.Build or 'latest'. Major, Minor, and Build are decimal numbers. Specify 'latest' to use the latest version of an image available at deploy time. Even if you use 'latest', the VM image will not automatically update after deploy time even if a new version becomes available.", + "resourceID": "ResourceID specifies an image to use by ID", + "type": "Type identifies the source of the image and related information, such as purchase plans. Valid values are \"ID\", \"MarketplaceWithPlan\", \"MarketplaceNoPlan\", and omitted, which means no opinion and the platform chooses a good default which may change over time. Currently that default is \"MarketplaceNoPlan\" if publisher data is supplied, or \"ID\" if not. For more information about purchase plans, see: https://docs.microsoft.com/en-us/azure/virtual-machines/linux/cli-ps-findimage#check-the-purchase-plan-information", +} + +func (Image) SwaggerDoc() map[string]string { + return map_Image +} + +var map_OSDisk = map[string]string{ + "osType": "OSType is the operating system type of the OS disk. Possible values include \"Linux\" and \"Windows\".", + "managedDisk": "ManagedDisk specifies the Managed Disk parameters for the OS disk.", + "diskSizeGB": "DiskSizeGB is the size in GB to assign to the data disk.", + "diskSettings": "DiskSettings describe ephemeral disk settings for the os disk.", + "cachingType": "CachingType specifies the caching requirements. Possible values include: 'None', 'ReadOnly', 'ReadWrite'. Empty value means no opinion and the platform chooses a default, which is subject to change over time. Currently the default is `None`.", +} + +func (OSDisk) SwaggerDoc() map[string]string { + return map_OSDisk +} + +var map_OSDiskManagedDiskParameters = map[string]string{ + "": "OSDiskManagedDiskParameters is the parameters of a OSDisk managed disk.", + "storageAccountType": "StorageAccountType is the storage account type to use. Possible values include \"Standard_LRS\", \"Premium_LRS\".", + "diskEncryptionSet": "DiskEncryptionSet is the disk encryption set properties", + "securityProfile": "securityProfile specifies the security profile for the managed disk.", +} + +func (OSDiskManagedDiskParameters) SwaggerDoc() map[string]string { + return map_OSDiskManagedDiskParameters +} + +var map_SecurityProfile = map[string]string{ + "": "SecurityProfile specifies the Security profile settings for a virtual machine or virtual machine scale set.", + "encryptionAtHost": "encryptionAtHost indicates whether Host Encryption should be enabled or disabled for a virtual machine or virtual machine scale set. This should be disabled when SecurityEncryptionType is set to DiskWithVMGuestState. Default is disabled.", + "settings": "settings specify the security type and the UEFI settings of the virtual machine. This field can be set for Confidential VMs and Trusted Launch for VMs.", +} + +func (SecurityProfile) SwaggerDoc() map[string]string { + return map_SecurityProfile +} + +var map_SecuritySettings = map[string]string{ + "": "SecuritySettings define the security type and the UEFI settings of the virtual machine.", + "securityType": "securityType specifies the SecurityType of the virtual machine. It has to be set to any specified value to enable UEFISettings. The default behavior is: UEFISettings will not be enabled unless this property is set.", + "confidentialVM": "confidentialVM specifies the security configuration of the virtual machine. For more information regarding Confidential VMs, please refer to: https://learn.microsoft.com/azure/confidential-computing/confidential-vm-overview", + "trustedLaunch": "trustedLaunch specifies the security configuration of the virtual machine. For more information regarding TrustedLaunch for VMs, please refer to: https://learn.microsoft.com/azure/virtual-machines/trusted-launch", +} + +func (SecuritySettings) SwaggerDoc() map[string]string { + return map_SecuritySettings +} + +var map_SpotVMOptions = map[string]string{ + "": "SpotVMOptions defines the options relevant to running the Machine on Spot VMs", + "maxPrice": "MaxPrice defines the maximum price the user is willing to pay for Spot VM instances", +} + +func (SpotVMOptions) SwaggerDoc() map[string]string { + return map_SpotVMOptions +} + +var map_TrustedLaunch = map[string]string{ + "": "TrustedLaunch defines the UEFI settings for the virtual machine.", + "uefiSettings": "uefiSettings specifies the security settings like secure boot and vTPM used while creating the virtual machine.", +} + +func (TrustedLaunch) SwaggerDoc() map[string]string { + return map_TrustedLaunch +} + +var map_UEFISettings = map[string]string{ + "": "UEFISettings specifies the security settings like secure boot and vTPM used while creating the virtual machine.", + "secureBoot": "secureBoot specifies whether secure boot should be enabled on the virtual machine. Secure Boot verifies the digital signature of all boot components and halts the boot process if signature verification fails. If omitted, the platform chooses a default, which is subject to change over time, currently that default is disabled.", + "virtualizedTrustedPlatformModule": "virtualizedTrustedPlatformModule specifies whether vTPM should be enabled on the virtual machine. When enabled the virtualized trusted platform module measurements are used to create a known good boot integrity policy baseline. The integrity policy baseline is used for comparison with measurements from subsequent VM boots to determine if anything has changed. This is required to be enabled if SecurityEncryptionType is defined. If omitted, the platform chooses a default, which is subject to change over time, currently that default is disabled.", +} + +func (UEFISettings) SwaggerDoc() map[string]string { + return map_UEFISettings +} + +var map_VMDiskSecurityProfile = map[string]string{ + "": "VMDiskSecurityProfile specifies the security profile settings for the managed disk. It can be set only for Confidential VMs.", + "diskEncryptionSet": "diskEncryptionSet specifies the customer managed disk encryption set resource id for the managed disk that is used for Customer Managed Key encrypted ConfidentialVM OS Disk and VMGuest blob.", + "securityEncryptionType": "securityEncryptionType specifies the encryption type of the managed disk. It is set to DiskWithVMGuestState to encrypt the managed disk along with the VMGuestState blob, and to VMGuestStateOnly to encrypt the VMGuestState blob only. When set to VMGuestStateOnly, the vTPM should be enabled. When set to DiskWithVMGuestState, both SecureBoot and vTPM should be enabled. If the above conditions are not fulfilled, the VM will not be created and the respective error will be returned. It can be set only for Confidential VMs. Confidential VMs are defined by their SecurityProfile.SecurityType being set to ConfidentialVM, the SecurityEncryptionType of their OS disk being set to one of the allowed values and by enabling the respective SecurityProfile.UEFISettings of the VM (i.e. vTPM and SecureBoot), depending on the selected SecurityEncryptionType. For further details on Azure Confidential VMs, please refer to the respective documentation: https://learn.microsoft.com/azure/confidential-computing/confidential-vm-overview", +} + +func (VMDiskSecurityProfile) SwaggerDoc() map[string]string { + return map_VMDiskSecurityProfile +} + +var map_GCPDisk = map[string]string{ + "": "GCPDisk describes disks for GCP.", + "autoDelete": "AutoDelete indicates if the disk will be auto-deleted when the instance is deleted (default false).", + "boot": "Boot indicates if this is a boot disk (default false).", + "sizeGb": "SizeGB is the size of the disk (in GB).", + "type": "Type is the type of the disk (eg: pd-standard).", + "image": "Image is the source image to create this disk.", + "labels": "Labels list of labels to apply to the disk.", + "encryptionKey": "EncryptionKey is the customer-supplied encryption key of the disk.", +} + +func (GCPDisk) SwaggerDoc() map[string]string { + return map_GCPDisk +} + +var map_GCPEncryptionKeyReference = map[string]string{ + "": "GCPEncryptionKeyReference describes the encryptionKey to use for a disk's encryption.", + "kmsKey": "KMSKeyName is the reference KMS key, in the format", + "kmsKeyServiceAccount": "KMSKeyServiceAccount is the service account being used for the encryption request for the given KMS key. If absent, the Compute Engine default service account is used. See https://cloud.google.com/compute/docs/access/service-accounts#compute_engine_service_account for details on the default service account.", +} + +func (GCPEncryptionKeyReference) SwaggerDoc() map[string]string { + return map_GCPEncryptionKeyReference +} + +var map_GCPGPUConfig = map[string]string{ + "": "GCPGPUConfig describes type and count of GPUs attached to the instance on GCP.", + "count": "Count is the number of GPUs to be attached to an instance.", + "type": "Type is the type of GPU to be attached to an instance. Supported GPU types are: nvidia-tesla-k80, nvidia-tesla-p100, nvidia-tesla-v100, nvidia-tesla-p4, nvidia-tesla-t4", +} + +func (GCPGPUConfig) SwaggerDoc() map[string]string { + return map_GCPGPUConfig +} + +var map_GCPKMSKeyReference = map[string]string{ + "": "GCPKMSKeyReference gathers required fields for looking up a GCP KMS Key", + "name": "Name is the name of the customer managed encryption key to be used for the disk encryption.", + "keyRing": "KeyRing is the name of the KMS Key Ring which the KMS Key belongs to.", + "projectID": "ProjectID is the ID of the Project in which the KMS Key Ring exists. Defaults to the VM ProjectID if not set.", + "location": "Location is the GCP location in which the Key Ring exists.", +} + +func (GCPKMSKeyReference) SwaggerDoc() map[string]string { + return map_GCPKMSKeyReference +} + +var map_GCPMachineProviderSpec = map[string]string{ + "": "GCPMachineProviderSpec is the type that will be embedded in a Machine.Spec.ProviderSpec field for an GCP virtual machine. It is used by the GCP machine actuator to create a single Machine. Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "userDataSecret": "UserDataSecret contains a local reference to a secret that contains the UserData to apply to the instance", + "credentialsSecret": "CredentialsSecret is a reference to the secret with GCP credentials.", + "canIPForward": "CanIPForward Allows this instance to send and receive packets with non-matching destination or source IPs. This is required if you plan to use this instance to forward routes.", + "deletionProtection": "DeletionProtection whether the resource should be protected against deletion.", + "disks": "Disks is a list of disks to be attached to the VM.", + "labels": "Labels list of labels to apply to the VM.", + "gcpMetadata": "Metadata key/value pairs to apply to the VM.", + "networkInterfaces": "NetworkInterfaces is a list of network interfaces to be attached to the VM.", + "serviceAccounts": "ServiceAccounts is a list of GCP service accounts to be used by the VM.", + "tags": "Tags list of tags to apply to the VM.", + "targetPools": "TargetPools are used for network TCP/UDP load balancing. A target pool references member instances, an associated legacy HttpHealthCheck resource, and, optionally, a backup target pool", + "machineType": "MachineType is the machine type to use for the VM.", + "region": "Region is the region in which the GCP machine provider will create the VM.", + "zone": "Zone is the zone in which the GCP machine provider will create the VM.", + "projectID": "ProjectID is the project in which the GCP machine provider will create the VM.", + "gpus": "GPUs is a list of GPUs to be attached to the VM.", + "preemptible": "Preemptible indicates if created instance is preemptible.", + "onHostMaintenance": "OnHostMaintenance determines the behavior when a maintenance event occurs that might cause the instance to reboot. This is required to be set to \"Terminate\" if you want to provision machine with attached GPUs. Otherwise, allowed values are \"Migrate\" and \"Terminate\". If omitted, the platform chooses a default, which is subject to change over time, currently that default is \"Migrate\".", + "restartPolicy": "RestartPolicy determines the behavior when an instance crashes or the underlying infrastructure provider stops the instance as part of a maintenance event (default \"Always\"). Cannot be \"Always\" with preemptible instances. Otherwise, allowed values are \"Always\" and \"Never\". If omitted, the platform chooses a default, which is subject to change over time, currently that default is \"Always\". RestartPolicy represents AutomaticRestart in GCP compute api", + "shieldedInstanceConfig": "ShieldedInstanceConfig is the Shielded VM configuration for the VM", + "confidentialCompute": "confidentialCompute Defines whether the instance should have confidential compute enabled. If enabled OnHostMaintenance is required to be set to \"Terminate\". If omitted, the platform chooses a default, which is subject to change over time, currently that default is false.", +} + +func (GCPMachineProviderSpec) SwaggerDoc() map[string]string { + return map_GCPMachineProviderSpec +} + +var map_GCPMachineProviderStatus = map[string]string{ + "": "GCPMachineProviderStatus is the type that will be embedded in a Machine.Status.ProviderStatus field. It contains GCP-specific status information. Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", + "instanceId": "InstanceID is the ID of the instance in GCP", + "instanceState": "InstanceState is the provisioning state of the GCP Instance.", + "conditions": "Conditions is a set of conditions associated with the Machine to indicate errors or other status", +} + +func (GCPMachineProviderStatus) SwaggerDoc() map[string]string { + return map_GCPMachineProviderStatus +} + +var map_GCPMetadata = map[string]string{ + "": "GCPMetadata describes metadata for GCP.", + "key": "Key is the metadata key.", + "value": "Value is the metadata value.", +} + +func (GCPMetadata) SwaggerDoc() map[string]string { + return map_GCPMetadata +} + +var map_GCPNetworkInterface = map[string]string{ + "": "GCPNetworkInterface describes network interfaces for GCP", + "publicIP": "PublicIP indicates if true a public IP will be used", + "network": "Network is the network name.", + "projectID": "ProjectID is the project in which the GCP machine provider will create the VM.", + "subnetwork": "Subnetwork is the subnetwork name.", +} + +func (GCPNetworkInterface) SwaggerDoc() map[string]string { + return map_GCPNetworkInterface +} + +var map_GCPServiceAccount = map[string]string{ + "": "GCPServiceAccount describes service accounts for GCP.", + "email": "Email is the service account email.", + "scopes": "Scopes list of scopes to be assigned to the service account.", +} + +func (GCPServiceAccount) SwaggerDoc() map[string]string { + return map_GCPServiceAccount +} + +var map_GCPShieldedInstanceConfig = map[string]string{ + "": "GCPShieldedInstanceConfig describes the shielded VM configuration of the instance on GCP. Shielded VM configuration allow users to enable and disable Secure Boot, vTPM, and Integrity Monitoring.", + "secureBoot": "SecureBoot Defines whether the instance should have secure boot enabled. Secure Boot verify the digital signature of all boot components, and halting the boot process if signature verification fails. If omitted, the platform chooses a default, which is subject to change over time, currently that default is Disabled.", + "virtualizedTrustedPlatformModule": "VirtualizedTrustedPlatformModule enable virtualized trusted platform module measurements to create a known good boot integrity policy baseline. The integrity policy baseline is used for comparison with measurements from subsequent VM boots to determine if anything has changed. This is required to be set to \"Enabled\" if IntegrityMonitoring is enabled. If omitted, the platform chooses a default, which is subject to change over time, currently that default is Enabled.", + "integrityMonitoring": "IntegrityMonitoring determines whether the instance should have integrity monitoring that verify the runtime boot integrity. Compares the most recent boot measurements to the integrity policy baseline and return a pair of pass/fail results depending on whether they match or not. If omitted, the platform chooses a default, which is subject to change over time, currently that default is Enabled.", +} + +func (GCPShieldedInstanceConfig) SwaggerDoc() map[string]string { + return map_GCPShieldedInstanceConfig +} + +var map_LastOperation = map[string]string{ + "": "LastOperation represents the detail of the last performed operation on the MachineObject.", + "description": "Description is the human-readable description of the last operation.", + "lastUpdated": "LastUpdated is the timestamp at which LastOperation API was last-updated.", + "state": "State is the current status of the last performed operation. E.g. Processing, Failed, Successful etc", + "type": "Type is the type of operation which was last performed. E.g. Create, Delete, Update etc", +} + +func (LastOperation) SwaggerDoc() map[string]string { + return map_LastOperation +} + +var map_LifecycleHook = map[string]string{ + "": "LifecycleHook represents a single instance of a lifecycle hook", + "name": "Name defines a unique name for the lifcycle hook. The name should be unique and descriptive, ideally 1-3 words, in CamelCase or it may be namespaced, eg. foo.example.com/CamelCase. Names must be unique and should only be managed by a single entity.", + "owner": "Owner defines the owner of the lifecycle hook. This should be descriptive enough so that users can identify who/what is responsible for blocking the lifecycle. This could be the name of a controller (e.g. clusteroperator/etcd) or an administrator managing the hook.", +} + +func (LifecycleHook) SwaggerDoc() map[string]string { + return map_LifecycleHook +} + +var map_LifecycleHooks = map[string]string{ + "": "LifecycleHooks allow users to pause operations on the machine at certain prefedined points within the machine lifecycle.", + "preDrain": "PreDrain hooks prevent the machine from being drained. This also blocks further lifecycle events, such as termination.", + "preTerminate": "PreTerminate hooks prevent the machine from being terminated. PreTerminate hooks be actioned after the Machine has been drained.", +} + +func (LifecycleHooks) SwaggerDoc() map[string]string { + return map_LifecycleHooks +} + +var map_Machine = map[string]string{ + "": "Machine is the Schema for the machines API Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (Machine) SwaggerDoc() map[string]string { + return map_Machine +} + +var map_MachineList = map[string]string{ + "": "MachineList contains a list of Machine Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (MachineList) SwaggerDoc() map[string]string { + return map_MachineList +} + +var map_MachineSpec = map[string]string{ + "": "MachineSpec defines the desired state of Machine", + "metadata": "ObjectMeta will autopopulate the Node created. Use this to indicate what labels, annotations, name prefix, etc., should be used when creating the Node.", + "lifecycleHooks": "LifecycleHooks allow users to pause operations on the machine at certain predefined points within the machine lifecycle.", + "taints": "The list of the taints to be applied to the corresponding Node in additive manner. This list will not overwrite any other taints added to the Node on an ongoing basis by other entities. These taints should be actively reconciled e.g. if you ask the machine controller to apply a taint and then manually remove the taint the machine controller will put it back) but not have the machine controller remove any taints", + "providerSpec": "ProviderSpec details Provider-specific configuration to use during node creation.", + "providerID": "ProviderID is the identification ID of the machine provided by the provider. This field must match the provider ID as seen on the node object corresponding to this machine. This field is required by higher level consumers of cluster-api. Example use case is cluster autoscaler with cluster-api as provider. Clean-up logic in the autoscaler compares machines to nodes to find out machines at provider which could not get registered as Kubernetes nodes. With cluster-api as a generic out-of-tree provider for autoscaler, this field is required by autoscaler to be able to have a provider view of the list of machines. Another list of nodes is queried from the k8s apiserver and then a comparison is done to find out unregistered machines and are marked for delete. This field will be set by the actuators and consumed by higher level entities like autoscaler that will be interfacing with cluster-api as generic provider.", +} + +func (MachineSpec) SwaggerDoc() map[string]string { + return map_MachineSpec +} + +var map_MachineStatus = map[string]string{ + "": "MachineStatus defines the observed state of Machine", + "nodeRef": "NodeRef will point to the corresponding Node if it exists.", + "lastUpdated": "LastUpdated identifies when this status was last observed.", + "errorReason": "ErrorReason will be set in the event that there is a terminal problem reconciling the Machine and will contain a succinct value suitable for machine interpretation.\n\nThis field should not be set for transitive errors that a controller faces that are expected to be fixed automatically over time (like service outages), but instead indicate that something is fundamentally wrong with the Machine's spec or the configuration of the controller, and that manual intervention is required. Examples of terminal errors would be invalid combinations of settings in the spec, values that are unsupported by the controller, or the responsible controller itself being critically misconfigured.\n\nAny transient errors that occur during the reconciliation of Machines can be added as events to the Machine object and/or logged in the controller's output.", + "errorMessage": "ErrorMessage will be set in the event that there is a terminal problem reconciling the Machine and will contain a more verbose string suitable for logging and human consumption.\n\nThis field should not be set for transitive errors that a controller faces that are expected to be fixed automatically over time (like service outages), but instead indicate that something is fundamentally wrong with the Machine's spec or the configuration of the controller, and that manual intervention is required. Examples of terminal errors would be invalid combinations of settings in the spec, values that are unsupported by the controller, or the responsible controller itself being critically misconfigured.\n\nAny transient errors that occur during the reconciliation of Machines can be added as events to the Machine object and/or logged in the controller's output.", + "providerStatus": "ProviderStatus details a Provider-specific status. It is recommended that providers maintain their own versioned API types that should be serialized/deserialized from this field.", + "addresses": "Addresses is a list of addresses assigned to the machine. Queried from cloud provider, if available.", + "lastOperation": "LastOperation describes the last-operation performed by the machine-controller. This API should be useful as a history in terms of the latest operation performed on the specific machine. It should also convey the state of the latest-operation for example if it is still on-going, failed or completed successfully.", + "phase": "Phase represents the current phase of machine actuation. One of: Failed, Provisioning, Provisioned, Running, Deleting", + "conditions": "Conditions defines the current state of the Machine", +} + +func (MachineStatus) SwaggerDoc() map[string]string { + return map_MachineStatus +} + +var map_MachineHealthCheck = map[string]string{ + "": "MachineHealthCheck is the Schema for the machinehealthchecks API Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Specification of machine health check policy", + "status": "Most recently observed status of MachineHealthCheck resource", +} + +func (MachineHealthCheck) SwaggerDoc() map[string]string { + return map_MachineHealthCheck +} + +var map_MachineHealthCheckList = map[string]string{ + "": "MachineHealthCheckList contains a list of MachineHealthCheck Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (MachineHealthCheckList) SwaggerDoc() map[string]string { + return map_MachineHealthCheckList +} + +var map_MachineHealthCheckSpec = map[string]string{ + "": "MachineHealthCheckSpec defines the desired state of MachineHealthCheck", + "selector": "Label selector to match machines whose health will be exercised. Note: An empty selector will match all machines.", + "unhealthyConditions": "UnhealthyConditions contains a list of the conditions that determine whether a node is considered unhealthy. The conditions are combined in a logical OR, i.e. if any of the conditions is met, the node is unhealthy.", + "maxUnhealthy": "Any farther remediation is only allowed if at most \"MaxUnhealthy\" machines selected by \"selector\" are not healthy. Expects either a postive integer value or a percentage value. Percentage values must be positive whole numbers and are capped at 100%. Both 0 and 0% are valid and will block all remediation.", + "nodeStartupTimeout": "Machines older than this duration without a node will be considered to have failed and will be remediated. To prevent Machines without Nodes from being removed, disable startup checks by setting this value explicitly to \"0\". Expects an unsigned duration string of decimal numbers each with optional fraction and a unit suffix, eg \"300ms\", \"1.5h\" or \"2h45m\". Valid time units are \"ns\", \"us\" (or \"µs\"), \"ms\", \"s\", \"m\", \"h\".", + "remediationTemplate": "RemediationTemplate is a reference to a remediation template provided by an infrastructure provider.\n\nThis field is completely optional, when filled, the MachineHealthCheck controller creates a new object from the template referenced and hands off remediation of the machine to a controller that lives outside of Machine API Operator.", +} + +func (MachineHealthCheckSpec) SwaggerDoc() map[string]string { + return map_MachineHealthCheckSpec +} + +var map_MachineHealthCheckStatus = map[string]string{ + "": "MachineHealthCheckStatus defines the observed state of MachineHealthCheck", + "expectedMachines": "total number of machines counted by this machine health check", + "currentHealthy": "total number of machines counted by this machine health check", + "remediationsAllowed": "RemediationsAllowed is the number of further remediations allowed by this machine health check before maxUnhealthy short circuiting will be applied", + "conditions": "Conditions defines the current state of the MachineHealthCheck", +} + +func (MachineHealthCheckStatus) SwaggerDoc() map[string]string { + return map_MachineHealthCheckStatus +} + +var map_UnhealthyCondition = map[string]string{ + "": "UnhealthyCondition represents a Node condition type and value with a timeout specified as a duration. When the named condition has been in the given status for at least the timeout value, a node is considered unhealthy.", + "timeout": "Expects an unsigned duration string of decimal numbers each with optional fraction and a unit suffix, eg \"300ms\", \"1.5h\" or \"2h45m\". Valid time units are \"ns\", \"us\" (or \"µs\"), \"ms\", \"s\", \"m\", \"h\".", +} + +func (UnhealthyCondition) SwaggerDoc() map[string]string { + return map_UnhealthyCondition +} + +var map_MachineSet = map[string]string{ + "": "MachineSet ensures that a specified number of machines replicas are running at any given time. Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (MachineSet) SwaggerDoc() map[string]string { + return map_MachineSet +} + +var map_MachineSetList = map[string]string{ + "": "MachineSetList contains a list of MachineSet Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (MachineSetList) SwaggerDoc() map[string]string { + return map_MachineSetList +} + +var map_MachineSetSpec = map[string]string{ + "": "MachineSetSpec defines the desired state of MachineSet", + "replicas": "Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1.", + "minReadySeconds": "MinReadySeconds is the minimum number of seconds for which a newly created machine should be ready. Defaults to 0 (machine will be considered available as soon as it is ready)", + "deletePolicy": "DeletePolicy defines the policy used to identify nodes to delete when downscaling. Defaults to \"Random\". Valid values are \"Random, \"Newest\", \"Oldest\"", + "selector": "Selector is a label query over machines that should match the replica count. Label keys and values that must match in order to be controlled by this MachineSet. It must match the machine template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", + "template": "Template is the object that describes the machine that will be created if insufficient replicas are detected.", +} + +func (MachineSetSpec) SwaggerDoc() map[string]string { + return map_MachineSetSpec +} + +var map_MachineSetStatus = map[string]string{ + "": "MachineSetStatus defines the observed state of MachineSet", + "replicas": "Replicas is the most recently observed number of replicas.", + "fullyLabeledReplicas": "The number of replicas that have labels matching the labels of the machine template of the MachineSet.", + "readyReplicas": "The number of ready replicas for this MachineSet. A machine is considered ready when the node has been created and is \"Ready\".", + "availableReplicas": "The number of available replicas (ready for at least minReadySeconds) for this MachineSet.", + "observedGeneration": "ObservedGeneration reflects the generation of the most recently observed MachineSet.", + "errorReason": "In the event that there is a terminal problem reconciling the replicas, both ErrorReason and ErrorMessage will be set. ErrorReason will be populated with a succinct value suitable for machine interpretation, while ErrorMessage will contain a more verbose string suitable for logging and human consumption.\n\nThese fields should not be set for transitive errors that a controller faces that are expected to be fixed automatically over time (like service outages), but instead indicate that something is fundamentally wrong with the MachineTemplate's spec or the configuration of the machine controller, and that manual intervention is required. Examples of terminal errors would be invalid combinations of settings in the spec, values that are unsupported by the machine controller, or the responsible machine controller itself being critically misconfigured.\n\nAny transient errors that occur during the reconciliation of Machines can be added as events to the MachineSet object and/or logged in the controller's output.", +} + +func (MachineSetStatus) SwaggerDoc() map[string]string { + return map_MachineSetStatus +} + +var map_MachineTemplateSpec = map[string]string{ + "": "MachineTemplateSpec describes the data needed to create a Machine from a template", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Specification of the desired behavior of the machine. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", +} + +func (MachineTemplateSpec) SwaggerDoc() map[string]string { + return map_MachineTemplateSpec +} + +var map_Condition = map[string]string{ + "": "Condition defines an observation of a Machine API resource operational state.", + "type": "Type of condition in CamelCase or in foo.example.com/CamelCase. Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important.", + "status": "Status of the condition, one of True, False, Unknown.", + "severity": "Severity provides an explicit classification of Reason code, so the users or machines can immediately understand the current situation and act accordingly. The Severity field MUST be set only when Status=False.", + "lastTransitionTime": "Last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.", + "reason": "The reason for the condition's last transition in CamelCase. The specific API may choose whether or not this field is considered a guaranteed API. This field may not be empty.", + "message": "A human readable message indicating details about the transition. This field may be empty.", +} + +func (Condition) SwaggerDoc() map[string]string { + return map_Condition +} + +var map_ObjectMeta = map[string]string{ + "": "ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create. This is a copy of customizable fields from metav1.ObjectMeta.\n\nObjectMeta is embedded in `Machine.Spec`, `MachineDeployment.Template` and `MachineSet.Template`, which are not top-level Kubernetes objects. Given that metav1.ObjectMeta has lots of special cases and read-only fields which end up in the generated CRD validation, having it as a subset simplifies the API and some issues that can impact user experience.\n\nDuring the [upgrade to controller-tools@v2](https://github.com/kubernetes-sigs/cluster-api/pull/1054) for v1alpha2, we noticed a failure would occur running Cluster API test suite against the new CRDs, specifically `spec.metadata.creationTimestamp in body must be of type string: \"null\"`. The investigation showed that `controller-tools@v2` behaves differently than its previous version when handling types from [metav1](k8s.io/apimachinery/pkg/apis/meta/v1) package.\n\nIn more details, we found that embedded (non-top level) types that embedded `metav1.ObjectMeta` had validation properties, including for `creationTimestamp` (metav1.Time). The `metav1.Time` type specifies a custom json marshaller that, when IsZero() is true, returns `null` which breaks validation because the field isn't marked as nullable.\n\nIn future versions, controller-tools@v2 might allow overriding the type and validation for embedded types. When that happens, this hack should be revisited.", + "name": "Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names", + "generateName": "GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency", + "namespace": "Namespace defines the space within each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces", + "labels": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels", + "annotations": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations", + "ownerReferences": "List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller.", +} + +func (ObjectMeta) SwaggerDoc() map[string]string { + return map_ObjectMeta +} + +var map_ProviderSpec = map[string]string{ + "": "ProviderSpec defines the configuration to use during node creation.", + "value": "Value is an inlined, serialized representation of the resource configuration. It is recommended that providers maintain their own versioned API types that should be serialized/deserialized from this field, akin to component config.", +} + +func (ProviderSpec) SwaggerDoc() map[string]string { + return map_ProviderSpec +} + +var map_AddressesFromPool = map[string]string{ + "": "AddressesFromPool is an IPAddressPool that will be used to create IPAddressClaims for fulfillment by an external controller.", + "group": "group of the IP address pool type known to an external IPAM controller. This should be a fully qualified domain name, for example, externalipam.controller.io.", + "resource": "resource of the IP address pool type known to an external IPAM controller. It is normally the plural form of the resource kind in lowercase, for example, ippools.", + "name": "name of an IP address pool, for example, pool-config-1.", +} + +func (AddressesFromPool) SwaggerDoc() map[string]string { + return map_AddressesFromPool +} + +var map_NetworkDeviceSpec = map[string]string{ + "": "NetworkDeviceSpec defines the network configuration for a virtual machine's network device.", + "networkName": "networkName is the name of the vSphere network or port group to which the network device will be connected, for example, port-group-1. When not provided, the vCenter API will attempt to select a default network. The available networks (port groups) can be listed using `govc ls 'network/*'`", + "gateway": "gateway is an IPv4 or IPv6 address which represents the subnet gateway, for example, 192.168.1.1.", + "ipAddrs": "ipAddrs is a list of one or more IPv4 and/or IPv6 addresses and CIDR to assign to this device, for example, 192.168.1.100/24. IP addresses provided via ipAddrs are intended to allow explicit assignment of a machine's IP address. IP pool configurations provided via addressesFromPool, however, defer IP address assignment to an external controller. If both addressesFromPool and ipAddrs are empty or not defined, DHCP will be used to assign an IP address. If both ipAddrs and addressesFromPools are defined, the IP addresses associated with ipAddrs will be applied first followed by IP addresses from addressesFromPools.", + "nameservers": "nameservers is a list of IPv4 and/or IPv6 addresses used as DNS nameservers, for example, 8.8.8.8. a nameserver is not provided by a fulfilled IPAddressClaim. If DHCP is not the source of IP addresses for this network device, nameservers should include a valid nameserver.", + "addressesFromPools": "addressesFromPools is a list of references to IP pool types and instances which are handled by an external controller. addressesFromPool configurations provided via addressesFromPools defer IP address assignment to an external controller. IP addresses provided via ipAddrs, however, are intended to allow explicit assignment of a machine's IP address. If both addressesFromPool and ipAddrs are empty or not defined, DHCP will assign an IP address. If both ipAddrs and addressesFromPools are defined, the IP addresses associated with ipAddrs will be applied first followed by IP addresses from addressesFromPools.", +} + +func (NetworkDeviceSpec) SwaggerDoc() map[string]string { + return map_NetworkDeviceSpec +} + +var map_NetworkSpec = map[string]string{ + "": "NetworkSpec defines the virtual machine's network configuration.", + "devices": "Devices defines the virtual machine's network interfaces.", +} + +func (NetworkSpec) SwaggerDoc() map[string]string { + return map_NetworkSpec +} + +var map_VSphereMachineProviderSpec = map[string]string{ + "": "VSphereMachineProviderSpec is the type that will be embedded in a Machine.Spec.ProviderSpec field for an VSphere virtual machine. It is used by the vSphere machine actuator to create a single Machine. Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", + "userDataSecret": "UserDataSecret contains a local reference to a secret that contains the UserData to apply to the instance", + "credentialsSecret": "CredentialsSecret is a reference to the secret with vSphere credentials.", + "template": "Template is the name, inventory path, or instance UUID of the template used to clone new machines.", + "workspace": "Workspace describes the workspace to use for the machine.", + "network": "Network is the network configuration for this machine's VM.", + "numCPUs": "NumCPUs is the number of virtual processors in a virtual machine. Defaults to the analogue property value in the template from which this machine is cloned.", + "numCoresPerSocket": "NumCPUs is the number of cores among which to distribute CPUs in this virtual machine. Defaults to the analogue property value in the template from which this machine is cloned.", + "memoryMiB": "MemoryMiB is the size of a virtual machine's memory, in MiB. Defaults to the analogue property value in the template from which this machine is cloned.", + "diskGiB": "DiskGiB is the size of a virtual machine's disk, in GiB. Defaults to the analogue property value in the template from which this machine is cloned. This parameter will be ignored if 'LinkedClone' CloneMode is set.", + "snapshot": "Snapshot is the name of the snapshot from which the VM was cloned", + "cloneMode": "CloneMode specifies the type of clone operation. The LinkedClone mode is only support for templates that have at least one snapshot. If the template has no snapshots, then CloneMode defaults to FullClone. When LinkedClone mode is enabled the DiskGiB field is ignored as it is not possible to expand disks of linked clones. Defaults to FullClone. When using LinkedClone, if no snapshots exist for the source template, falls back to FullClone.", +} + +func (VSphereMachineProviderSpec) SwaggerDoc() map[string]string { + return map_VSphereMachineProviderSpec +} + +var map_VSphereMachineProviderStatus = map[string]string{ + "": "VSphereMachineProviderStatus is the type that will be embedded in a Machine.Status.ProviderStatus field. It contains VSphere-specific status information. Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", + "instanceId": "InstanceID is the ID of the instance in VSphere", + "instanceState": "InstanceState is the provisioning state of the VSphere Instance.", + "conditions": "Conditions is a set of conditions associated with the Machine to indicate errors or other status", + "taskRef": "TaskRef is a managed object reference to a Task related to the machine. This value is set automatically at runtime and should not be set or modified by users.", +} + +func (VSphereMachineProviderStatus) SwaggerDoc() map[string]string { + return map_VSphereMachineProviderStatus +} + +var map_Workspace = map[string]string{ + "": "WorkspaceConfig defines a workspace configuration for the vSphere cloud provider.", + "server": "Server is the IP address or FQDN of the vSphere endpoint.", + "datacenter": "Datacenter is the datacenter in which VMs are created/located.", + "folder": "Folder is the folder in which VMs are created/located.", + "datastore": "Datastore is the datastore in which VMs are created/located.", + "resourcePool": "ResourcePool is the resource pool in which VMs are created/located.", +} + +func (Workspace) SwaggerDoc() map[string]string { + return map_Workspace +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/api/monitoring/.codegen.yaml b/vendor/github.com/openshift/api/monitoring/.codegen.yaml new file mode 100644 index 000000000..b865f353b --- /dev/null +++ b/vendor/github.com/openshift/api/monitoring/.codegen.yaml @@ -0,0 +1,8 @@ +schemapatch: + requiredFeatureSets: + - "" + - "Default" + - "TechPreviewNoUpgrade" +swaggerdocs: + disabled: false + commentPolicy: Enforce diff --git a/vendor/github.com/openshift/api/monitoring/install.go b/vendor/github.com/openshift/api/monitoring/install.go new file mode 100644 index 000000000..4a2539110 --- /dev/null +++ b/vendor/github.com/openshift/api/monitoring/install.go @@ -0,0 +1,26 @@ +package monitoring + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + monitoringv1alpha1 "github.com/openshift/api/monitoring/v1alpha1" +) + +const ( + GroupName = "monitoring.openshift.io" +) + +var ( + schemeBuilder = runtime.NewSchemeBuilder(monitoringv1alpha1.Install) + // Install is a function which adds every version of this group to a scheme + Install = schemeBuilder.AddToScheme +) + +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +func Kind(kind string) schema.GroupKind { + return schema.GroupKind{Group: GroupName, Kind: kind} +} diff --git a/vendor/github.com/openshift/api/monitoring/v1alpha1/0000_50_monitoring_01_alertingrules.crd.yaml b/vendor/github.com/openshift/api/monitoring/v1alpha1/0000_50_monitoring_01_alertingrules.crd.yaml new file mode 100644 index 000000000..d3ecf3882 --- /dev/null +++ b/vendor/github.com/openshift/api/monitoring/v1alpha1/0000_50_monitoring_01_alertingrules.crd.yaml @@ -0,0 +1,122 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/1179 + description: OpenShift Monitoring alerting rules + release.openshift.io/feature-set: TechPreviewNoUpgrade + name: alertingrules.monitoring.openshift.io +spec: + group: monitoring.openshift.io + names: + kind: AlertingRule + listKind: AlertingRuleList + plural: alertingrules + singular: alertingrule + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: "AlertingRule represents a set of user-defined Prometheus rule groups containing alerting rules. This resource is the supported method for cluster admins to create alerts based on metrics recorded by the platform monitoring stack in OpenShift, i.e. the Prometheus instance deployed to the openshift-monitoring namespace. You might use this to create custom alerting rules not shipped with OpenShift based on metrics from components such as the node_exporter, which provides machine-level metrics such as CPU usage, or kube-state-metrics, which provides metrics on Kubernetes usage. \n The API is mostly compatible with the upstream PrometheusRule type from the prometheus-operator. The primary difference being that recording rules are not allowed here -- only alerting rules. For each AlertingRule resource created, a corresponding PrometheusRule will be created in the openshift-monitoring namespace. OpenShift requires admins to use the AlertingRule resource rather than the upstream type in order to allow better OpenShift specific defaulting and validation, while not modifying the upstream APIs directly. \n You can find upstream API documentation for PrometheusRule resources here: \n https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md \n Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support." + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec describes the desired state of this AlertingRule object. + type: object + required: + - groups + properties: + groups: + description: "groups is a list of grouped alerting rules. Rule groups are the unit at which Prometheus parallelizes rule processing. All rules in a single group share a configured evaluation interval. All rules in the group will be processed together on this interval, sequentially, and all rules will be processed. \n It's common to group related alerting rules into a single AlertingRule resources, and within that resource, closely related alerts, or simply alerts with the same interval, into individual groups. You are also free to create AlertingRule resources with only a single rule group, but be aware that this can have a performance impact on Prometheus if the group is extremely large or has very complex query expressions to evaluate. Spreading very complex rules across multiple groups to allow them to be processed in parallel is also a common use-case." + type: array + minItems: 1 + items: + description: RuleGroup is a list of sequentially evaluated alerting rules. + type: object + required: + - name + - rules + properties: + interval: + description: "interval is how often rules in the group are evaluated. If not specified, it defaults to the global.evaluation_interval configured in Prometheus, which itself defaults to 30 seconds. You can check if this value has been modified from the default on your cluster by inspecting the platform Prometheus configuration: \n $ oc -n openshift-monitoring describe prometheus k8s \n The relevant field in that resource is: spec.evaluationInterval \n This is represented as a Prometheus duration, e.g. 1d, 1h30m, 5m, 10s. You can find the upstream documentation here: \n https://prometheus.io/docs/prometheus/latest/configuration/configuration/#duration" + type: string + pattern: ^(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?$ + name: + description: name is the name of the group. + type: string + rules: + description: rules is a list of sequentially evaluated alerting rules. Prometheus may process rule groups in parallel, but rules within a single group are always processed sequentially, and all rules are processed. + type: array + minItems: 1 + items: + description: 'Rule describes an alerting rule. See Prometheus documentation: - https://www.prometheus.io/docs/prometheus/latest/configuration/alerting_rules' + type: object + required: + - alert + - expr + properties: + alert: + description: alert is the name of the alert. Must be a valid label value, i.e. only contain ASCII letters, numbers, and underscores. + type: string + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + annotations: + description: "annotations to add to each alert. These are values that can be used to store longer additional information that you won't query on, such as alert descriptions or runbook links, e.g.: \n annotations: summary: HAProxy reload failure description: | This alert fires when HAProxy fails to reload its configuration, which will result in the router not picking up recently created or modified routes." + type: object + additionalProperties: + type: string + expr: + description: "expr is the PromQL expression to evaluate. Every evaluation cycle this is evaluated at the current time, and all resultant time series become pending or firing alerts. This is most often a string representing a PromQL expression, e.g.: \n mapi_current_pending_csr > mapi_max_pending_csr \n In rare cases this could be a simple integer, e.g. a simple \"1\" if the intent is to create an alert that is always firing. This is sometimes used to create an always-firing \"Watchdog\" alert in order to ensure the alerting pipeline is functional." + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + for: + description: 'for is the time period after which alerts are considered firing after first returning results. Alerts which have not yet fired for long enough are considered pending. This is represented as a Prometheus duration, for details on the format see: - https://prometheus.io/docs/prometheus/latest/configuration/configuration/#duration' + type: string + pattern: ^(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?$ + labels: + description: "labels to add or overwrite for each alert. The results of the PromQL expression for the alert will result in an existing set of labels for the alert, after evaluating the expression, for any label specified here with the same name as a label in that set, the label here wins and overwrites the previous value. These should typically be short identifying values that may be useful to query against. A common example is the alert severity: \n labels: severity: warning" + type: object + additionalProperties: + type: string + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + status: + description: status describes the current state of this AlertOverrides object. + type: object + properties: + observedGeneration: + description: observedGeneration is the last generation change you've dealt with. + type: integer + format: int64 + prometheusRule: + description: prometheusRule is the generated PrometheusRule for this AlertingRule. Each AlertingRule instance results in a generated PrometheusRule object in the same namespace, which is always the openshift-monitoring namespace. + type: object + required: + - name + properties: + name: + description: name of the referenced PrometheusRule. + type: string + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/vendor/github.com/openshift/api/monitoring/v1alpha1/0000_50_monitoring_02_alertrelabelconfigs.crd.yaml b/vendor/github.com/openshift/api/monitoring/v1alpha1/0000_50_monitoring_02_alertrelabelconfigs.crd.yaml new file mode 100644 index 000000000..677ed6cc9 --- /dev/null +++ b/vendor/github.com/openshift/api/monitoring/v1alpha1/0000_50_monitoring_02_alertrelabelconfigs.crd.yaml @@ -0,0 +1,140 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/1179 + description: OpenShift Monitoring alert relabel configurations + release.openshift.io/feature-set: TechPreviewNoUpgrade + name: alertrelabelconfigs.monitoring.openshift.io +spec: + group: monitoring.openshift.io + names: + kind: AlertRelabelConfig + listKind: AlertRelabelConfigList + plural: alertrelabelconfigs + singular: alertrelabelconfig + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: "AlertRelabelConfig defines a set of relabel configs for alerts. \n Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support." + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec describes the desired state of this AlertRelabelConfig object. + type: object + required: + - configs + properties: + configs: + description: configs is a list of sequentially evaluated alert relabel configs. + type: array + minItems: 1 + items: + description: 'RelabelConfig allows dynamic rewriting of label sets for alerts. See Prometheus documentation: - https://prometheus.io/docs/prometheus/latest/configuration/configuration/#alert_relabel_configs - https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config' + type: object + properties: + action: + description: 'action to perform based on regex matching. Must be one of: Replace, Keep, Drop, HashMod, LabelMap, LabelDrop, or LabelKeep. Default is: ''Replace''' + type: string + default: Replace + enum: + - Replace + - Keep + - Drop + - HashMod + - LabelMap + - LabelDrop + - LabelKeep + modulus: + description: modulus to take of the hash of the source label values. This can be combined with the 'HashMod' action to set 'target_label' to the 'modulus' of a hash of the concatenated 'source_labels'. + type: integer + format: int64 + regex: + description: 'regex against which the extracted value is matched. Default is: ''(.*)''' + type: string + replacement: + description: 'replacement value against which a regex replace is performed if the regular expression matches. This is required if the action is ''Replace'' or ''LabelMap''. Regex capture groups are available. Default is: ''$1''' + type: string + separator: + description: separator placed between concatenated source label values. When omitted, Prometheus will use its default value of ';'. + type: string + sourceLabels: + description: sourceLabels select values from existing labels. Their content is concatenated using the configured separator and matched against the configured regular expression for the Replace, Keep, and Drop actions. + type: array + items: + description: LabelName is a valid Prometheus label name which may only contain ASCII letters, numbers, and underscores. + type: string + pattern: ^[a-zA-Z_][a-zA-Z0-9_]*$ + targetLabel: + description: targetLabel to which the resulting value is written in a 'Replace' action. It is mandatory for 'Replace' and 'HashMod' actions. Regex capture groups are available. + type: string + status: + description: status describes the current state of this AlertRelabelConfig object. + type: object + properties: + conditions: + description: conditions contains details on the state of the AlertRelabelConfig, may be empty. + type: array + items: + description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, \n type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + type: object + required: + - lastTransitionTime + - message + - reason + - status + - type + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + type: string + format: date-time + message: + description: message is a human readable message indicating details about the transition. This may be an empty string. + type: string + maxLength: 32768 + observedGeneration: + description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. + type: integer + format: int64 + minimum: 0 + reason: + description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. + type: string + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + status: + description: status of the condition, one of True, False, Unknown. + type: string + enum: + - "True" + - "False" + - Unknown + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + type: string + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/vendor/github.com/openshift/api/monitoring/v1alpha1/Makefile b/vendor/github.com/openshift/api/monitoring/v1alpha1/Makefile new file mode 100644 index 000000000..536d21926 --- /dev/null +++ b/vendor/github.com/openshift/api/monitoring/v1alpha1/Makefile @@ -0,0 +1,3 @@ +.PHONY: test +test: + make -C ../../tests test GINKGO_EXTRA_ARGS=--focus="monitoring.openshift.io/v1alpha1" diff --git a/vendor/github.com/openshift/api/monitoring/v1alpha1/doc.go b/vendor/github.com/openshift/api/monitoring/v1alpha1/doc.go new file mode 100644 index 000000000..fde7f93d4 --- /dev/null +++ b/vendor/github.com/openshift/api/monitoring/v1alpha1/doc.go @@ -0,0 +1,6 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true + +// +groupName=monitoring.openshift.io +package v1alpha1 diff --git a/vendor/github.com/openshift/api/monitoring/v1alpha1/register.go b/vendor/github.com/openshift/api/monitoring/v1alpha1/register.go new file mode 100644 index 000000000..b0a28f995 --- /dev/null +++ b/vendor/github.com/openshift/api/monitoring/v1alpha1/register.go @@ -0,0 +1,41 @@ +package v1alpha1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + GroupName = "monitoring.openshift.io" + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, corev1.AddToScheme) + // Install is a function which adds this version to a scheme + Install = schemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = schemeBuilder.AddToScheme +) + +// Resource generated code relies on this being here, but it logically belongs to the group +// DEPRECATED +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &AlertingRule{}, + &AlertingRuleList{}, + &AlertRelabelConfig{}, + &AlertRelabelConfigList{}, + ) + metav1.AddToGroupVersion(scheme, GroupVersion) + return nil +} diff --git a/vendor/github.com/openshift/api/monitoring/v1alpha1/techpreview.alertingrule.testsuite.yaml b/vendor/github.com/openshift/api/monitoring/v1alpha1/techpreview.alertingrule.testsuite.yaml new file mode 100644 index 000000000..ca4fcf383 --- /dev/null +++ b/vendor/github.com/openshift/api/monitoring/v1alpha1/techpreview.alertingrule.testsuite.yaml @@ -0,0 +1,24 @@ +apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this +name: "[TechPreview] AlertingRule" +crd: 0000_50_monitoring_01_alertingrules.crd.yaml +tests: + onCreate: + - name: Should be able to create a minimal AlertingRule + initial: | + apiVersion: monitoring.openshift.io/v1alpha1 + kind: AlertingRule + spec: + groups: + - name: foo + rules: + - alert: foo + expr: foo + expected: | + apiVersion: monitoring.openshift.io/v1alpha1 + kind: AlertingRule + spec: + groups: + - name: foo + rules: + - alert: foo + expr: foo diff --git a/vendor/github.com/openshift/api/monitoring/v1alpha1/techpreview.alertrelabelconfig.testsuite.yaml b/vendor/github.com/openshift/api/monitoring/v1alpha1/techpreview.alertrelabelconfig.testsuite.yaml new file mode 100644 index 000000000..c7978fd5f --- /dev/null +++ b/vendor/github.com/openshift/api/monitoring/v1alpha1/techpreview.alertrelabelconfig.testsuite.yaml @@ -0,0 +1,20 @@ +apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this +name: "[TechPreview] AlertRelabelConfig" +crd: 0000_50_monitoring_02_alertrelabelconfigs.crd.yaml +tests: + onCreate: + - name: Should be able to create a minimal AlertRelabelConfig + initial: | + apiVersion: monitoring.openshift.io/v1alpha1 + kind: AlertRelabelConfig + spec: + configs: + # At least one item is required but all fields are optional, + # Use action as it is defaulted when not set. + - action: Replace + expected: | + apiVersion: monitoring.openshift.io/v1alpha1 + kind: AlertRelabelConfig + spec: + configs: + - action: Replace diff --git a/vendor/github.com/openshift/api/monitoring/v1alpha1/types.go b/vendor/github.com/openshift/api/monitoring/v1alpha1/types.go new file mode 100644 index 000000000..4a82de51e --- /dev/null +++ b/vendor/github.com/openshift/api/monitoring/v1alpha1/types.go @@ -0,0 +1,349 @@ +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +// AlertingRule represents a set of user-defined Prometheus rule groups containing +// alerting rules. This resource is the supported method for cluster admins to +// create alerts based on metrics recorded by the platform monitoring stack in +// OpenShift, i.e. the Prometheus instance deployed to the openshift-monitoring +// namespace. You might use this to create custom alerting rules not shipped with +// OpenShift based on metrics from components such as the node_exporter, which +// provides machine-level metrics such as CPU usage, or kube-state-metrics, which +// provides metrics on Kubernetes usage. +// +// The API is mostly compatible with the upstream PrometheusRule type from the +// prometheus-operator. The primary difference being that recording rules are not +// allowed here -- only alerting rules. For each AlertingRule resource created, a +// corresponding PrometheusRule will be created in the openshift-monitoring +// namespace. OpenShift requires admins to use the AlertingRule resource rather +// than the upstream type in order to allow better OpenShift specific defaulting +// and validation, while not modifying the upstream APIs directly. +// +// You can find upstream API documentation for PrometheusRule resources here: +// +// https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +// +genclient +// +k8s:openapi-gen=true +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:subresource:status +type AlertingRule struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec describes the desired state of this AlertingRule object. + Spec AlertingRuleSpec `json:"spec"` + + // status describes the current state of this AlertOverrides object. + // + // +optional + Status AlertingRuleStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// AlertingRuleList is a list of AlertingRule objects. +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +// +k8s:openapi-gen=true +type AlertingRuleList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty"` + + // items is a list of AlertingRule objects. + Items []AlertingRule `json:"items"` +} + +// AlertingRuleSpec is the desired state of an AlertingRule resource. +// +// +k8s:openapi-gen=true +type AlertingRuleSpec struct { + // groups is a list of grouped alerting rules. Rule groups are the unit at + // which Prometheus parallelizes rule processing. All rules in a single group + // share a configured evaluation interval. All rules in the group will be + // processed together on this interval, sequentially, and all rules will be + // processed. + // + // It's common to group related alerting rules into a single AlertingRule + // resources, and within that resource, closely related alerts, or simply + // alerts with the same interval, into individual groups. You are also free + // to create AlertingRule resources with only a single rule group, but be + // aware that this can have a performance impact on Prometheus if the group is + // extremely large or has very complex query expressions to evaluate. + // Spreading very complex rules across multiple groups to allow them to be + // processed in parallel is also a common use-case. + // + // +listType=map + // +listMapKey=name + // +kubebuilder:validation:MinItems:=1 + Groups []RuleGroup `json:"groups"` +} + +// RuleGroup is a list of sequentially evaluated alerting rules. +// +// +k8s:openapi-gen=true +type RuleGroup struct { + // name is the name of the group. + // + // +kubebuilder:validation:Required + Name string `json:"name"` + + // interval is how often rules in the group are evaluated. If not specified, + // it defaults to the global.evaluation_interval configured in Prometheus, + // which itself defaults to 30 seconds. You can check if this value has been + // modified from the default on your cluster by inspecting the platform + // Prometheus configuration: + // + // $ oc -n openshift-monitoring describe prometheus k8s + // + // The relevant field in that resource is: spec.evaluationInterval + // + // This is represented as a Prometheus duration, e.g. 1d, 1h30m, 5m, 10s. You + // can find the upstream documentation here: + // + // https://prometheus.io/docs/prometheus/latest/configuration/configuration/#duration + // + // +kubebuilder:validation:Pattern:="^(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?$" + // +optional + Interval string `json:"interval,omitempty"` + + // rules is a list of sequentially evaluated alerting rules. Prometheus may + // process rule groups in parallel, but rules within a single group are always + // processed sequentially, and all rules are processed. + // + // +kubebuilder:validation:MinItems:=1 + Rules []Rule `json:"rules"` +} + +// Rule describes an alerting rule. +// See Prometheus documentation: +// - https://www.prometheus.io/docs/prometheus/latest/configuration/alerting_rules +// +// +k8s:openapi-gen=true +type Rule struct { + // alert is the name of the alert. Must be a valid label value, i.e. only + // contain ASCII letters, numbers, and underscores. + // + // +kubebuilder:validation:Pattern:="^[a-zA-Z_][a-zA-Z0-9_]*$" + // +required + Alert string `json:"alert"` + + // expr is the PromQL expression to evaluate. Every evaluation cycle this is + // evaluated at the current time, and all resultant time series become pending + // or firing alerts. This is most often a string representing a PromQL + // expression, e.g.: + // + // mapi_current_pending_csr > mapi_max_pending_csr + // + // In rare cases this could be a simple integer, e.g. a simple "1" if the + // intent is to create an alert that is always firing. This is sometimes used + // to create an always-firing "Watchdog" alert in order to ensure the alerting + // pipeline is functional. + // + // +required + Expr intstr.IntOrString `json:"expr"` + + // for is the time period after which alerts are considered firing after first + // returning results. Alerts which have not yet fired for long enough are + // considered pending. This is represented as a Prometheus duration, for + // details on the format see: + // - https://prometheus.io/docs/prometheus/latest/configuration/configuration/#duration + // + // +kubebuilder:validation:Pattern:="^(([0-9]+)y)?(([0-9]+)w)?(([0-9]+)d)?(([0-9]+)h)?(([0-9]+)m)?(([0-9]+)s)?(([0-9]+)ms)?$" + // +optional + For string `json:"for,omitempty"` + + // labels to add or overwrite for each alert. The results of the PromQL + // expression for the alert will result in an existing set of labels for the + // alert, after evaluating the expression, for any label specified here with + // the same name as a label in that set, the label here wins and overwrites + // the previous value. These should typically be short identifying values + // that may be useful to query against. A common example is the alert + // severity: + // + // labels: + // severity: warning + // + // +optional + Labels map[string]string `json:"labels,omitempty"` + + // annotations to add to each alert. These are values that can be used to + // store longer additional information that you won't query on, such as alert + // descriptions or runbook links, e.g.: + // + // annotations: + // summary: HAProxy reload failure + // description: | + // This alert fires when HAProxy fails to reload its + // configuration, which will result in the router not picking up + // recently created or modified routes. + // + // +optional + Annotations map[string]string `json:"annotations,omitempty"` +} + +// AlertingRuleStatus is the status of an AlertingRule resource. +type AlertingRuleStatus struct { + // observedGeneration is the last generation change you've dealt with. + // + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + + // prometheusRule is the generated PrometheusRule for this AlertingRule. Each + // AlertingRule instance results in a generated PrometheusRule object in the + // same namespace, which is always the openshift-monitoring namespace. + // + // +optional + PrometheusRule PrometheusRuleRef `json:"prometheusRule,omitempty"` +} + +// PrometheusRuleRef is a reference to an existing PrometheusRule object. Each +// AlertingRule instance results in a generated PrometheusRule object in the same +// namespace, which is always the openshift-monitoring namespace. This is used to +// point to the generated PrometheusRule object in the AlertingRule status. +type PrometheusRuleRef struct { + // This is a struct so that we can support future expansion of fields within + // the reference should we ever need to. + + // name of the referenced PrometheusRule. + Name string `json:"name"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:subresource:status + +// AlertRelabelConfig defines a set of relabel configs for alerts. +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +// +k8s:openapi-gen=true +type AlertRelabelConfig struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec describes the desired state of this AlertRelabelConfig object. + Spec AlertRelabelConfigSpec `json:"spec"` + + // status describes the current state of this AlertRelabelConfig object. + // + // +optional + Status AlertRelabelConfigStatus `json:"status,omitempty"` +} + +// AlertRelabelConfigsSpec is the desired state of an AlertRelabelConfig resource. +// +// +k8s:openapi-gen=true +type AlertRelabelConfigSpec struct { + // configs is a list of sequentially evaluated alert relabel configs. + // + // +kubebuilder:validation:MinItems:=1 + Configs []RelabelConfig `json:"configs"` +} + +// AlertRelabelConfigStatus is the status of an AlertRelabelConfig resource. +type AlertRelabelConfigStatus struct { + // conditions contains details on the state of the AlertRelabelConfig, may be + // empty. + // + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` +} + +const ( + // AlertRelabelConfigReady is the condition type indicating readiness. + AlertRelabelConfigReady string = "Ready" +) + +// AlertRelabelConfigList is a list of AlertRelabelConfigs. +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +// +k8s:openapi-gen=true +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type AlertRelabelConfigList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty"` + + // items is a list of AlertRelabelConfigs. + Items []*AlertRelabelConfig `json:"items"` +} + +// LabelName is a valid Prometheus label name which may only contain ASCII +// letters, numbers, and underscores. +// +// +kubebuilder:validation:Pattern:="^[a-zA-Z_][a-zA-Z0-9_]*$" +type LabelName string + +// RelabelConfig allows dynamic rewriting of label sets for alerts. +// See Prometheus documentation: +// - https://prometheus.io/docs/prometheus/latest/configuration/configuration/#alert_relabel_configs +// - https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config +// +// +k8s:openapi-gen=true +type RelabelConfig struct { + // sourceLabels select values from existing labels. Their content is + // concatenated using the configured separator and matched against the + // configured regular expression for the Replace, Keep, and Drop actions. + // + // +optional + SourceLabels []LabelName `json:"sourceLabels,omitempty"` + + // separator placed between concatenated source label values. When omitted, + // Prometheus will use its default value of ';'. + // + // +optional + Separator string `json:"separator,omitempty"` + + // targetLabel to which the resulting value is written in a 'Replace' action. + // It is mandatory for 'Replace' and 'HashMod' actions. Regex capture groups + // are available. + // + // +optional + TargetLabel string `json:"targetLabel,omitempty"` + + // regex against which the extracted value is matched. Default is: '(.*)' + // + // +optional + Regex string `json:"regex,omitempty"` + + // modulus to take of the hash of the source label values. This can be + // combined with the 'HashMod' action to set 'target_label' to the 'modulus' + // of a hash of the concatenated 'source_labels'. + // + // +optional + Modulus uint64 `json:"modulus,omitempty"` + + // replacement value against which a regex replace is performed if the regular + // expression matches. This is required if the action is 'Replace' or + // 'LabelMap'. Regex capture groups are available. Default is: '$1' + // + // +optional + Replacement string `json:"replacement,omitempty"` + + // action to perform based on regex matching. Must be one of: Replace, Keep, + // Drop, HashMod, LabelMap, LabelDrop, or LabelKeep. Default is: 'Replace' + // + // +kubebuilder:validation:Enum=Replace;Keep;Drop;HashMod;LabelMap;LabelDrop;LabelKeep + // +kubebuilder:default=Replace + // +optional + Action string `json:"action,omitempty"` +} diff --git a/vendor/github.com/openshift/api/monitoring/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/monitoring/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 000000000..649d823a1 --- /dev/null +++ b/vendor/github.com/openshift/api/monitoring/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,314 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AlertRelabelConfig) DeepCopyInto(out *AlertRelabelConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlertRelabelConfig. +func (in *AlertRelabelConfig) DeepCopy() *AlertRelabelConfig { + if in == nil { + return nil + } + out := new(AlertRelabelConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AlertRelabelConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AlertRelabelConfigList) DeepCopyInto(out *AlertRelabelConfigList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]*AlertRelabelConfig, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(AlertRelabelConfig) + (*in).DeepCopyInto(*out) + } + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlertRelabelConfigList. +func (in *AlertRelabelConfigList) DeepCopy() *AlertRelabelConfigList { + if in == nil { + return nil + } + out := new(AlertRelabelConfigList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AlertRelabelConfigList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AlertRelabelConfigSpec) DeepCopyInto(out *AlertRelabelConfigSpec) { + *out = *in + if in.Configs != nil { + in, out := &in.Configs, &out.Configs + *out = make([]RelabelConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlertRelabelConfigSpec. +func (in *AlertRelabelConfigSpec) DeepCopy() *AlertRelabelConfigSpec { + if in == nil { + return nil + } + out := new(AlertRelabelConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AlertRelabelConfigStatus) DeepCopyInto(out *AlertRelabelConfigStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlertRelabelConfigStatus. +func (in *AlertRelabelConfigStatus) DeepCopy() *AlertRelabelConfigStatus { + if in == nil { + return nil + } + out := new(AlertRelabelConfigStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AlertingRule) DeepCopyInto(out *AlertingRule) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlertingRule. +func (in *AlertingRule) DeepCopy() *AlertingRule { + if in == nil { + return nil + } + out := new(AlertingRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AlertingRule) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AlertingRuleList) DeepCopyInto(out *AlertingRuleList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]AlertingRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlertingRuleList. +func (in *AlertingRuleList) DeepCopy() *AlertingRuleList { + if in == nil { + return nil + } + out := new(AlertingRuleList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AlertingRuleList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AlertingRuleSpec) DeepCopyInto(out *AlertingRuleSpec) { + *out = *in + if in.Groups != nil { + in, out := &in.Groups, &out.Groups + *out = make([]RuleGroup, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlertingRuleSpec. +func (in *AlertingRuleSpec) DeepCopy() *AlertingRuleSpec { + if in == nil { + return nil + } + out := new(AlertingRuleSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AlertingRuleStatus) DeepCopyInto(out *AlertingRuleStatus) { + *out = *in + out.PrometheusRule = in.PrometheusRule + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AlertingRuleStatus. +func (in *AlertingRuleStatus) DeepCopy() *AlertingRuleStatus { + if in == nil { + return nil + } + out := new(AlertingRuleStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrometheusRuleRef) DeepCopyInto(out *PrometheusRuleRef) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrometheusRuleRef. +func (in *PrometheusRuleRef) DeepCopy() *PrometheusRuleRef { + if in == nil { + return nil + } + out := new(PrometheusRuleRef) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RelabelConfig) DeepCopyInto(out *RelabelConfig) { + *out = *in + if in.SourceLabels != nil { + in, out := &in.SourceLabels, &out.SourceLabels + *out = make([]LabelName, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RelabelConfig. +func (in *RelabelConfig) DeepCopy() *RelabelConfig { + if in == nil { + return nil + } + out := new(RelabelConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Rule) DeepCopyInto(out *Rule) { + *out = *in + out.Expr = in.Expr + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Rule. +func (in *Rule) DeepCopy() *Rule { + if in == nil { + return nil + } + out := new(Rule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleGroup) DeepCopyInto(out *RuleGroup) { + *out = *in + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]Rule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleGroup. +func (in *RuleGroup) DeepCopy() *RuleGroup { + if in == nil { + return nil + } + out := new(RuleGroup) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/api/monitoring/v1alpha1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/monitoring/v1alpha1/zz_generated.swagger_doc_generated.go new file mode 100644 index 000000000..759290709 --- /dev/null +++ b/vendor/github.com/openshift/api/monitoring/v1alpha1/zz_generated.swagger_doc_generated.go @@ -0,0 +1,141 @@ +package v1alpha1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_AlertRelabelConfig = map[string]string{ + "": "AlertRelabelConfig defines a set of relabel configs for alerts.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec describes the desired state of this AlertRelabelConfig object.", + "status": "status describes the current state of this AlertRelabelConfig object.", +} + +func (AlertRelabelConfig) SwaggerDoc() map[string]string { + return map_AlertRelabelConfig +} + +var map_AlertRelabelConfigList = map[string]string{ + "": "AlertRelabelConfigList is a list of AlertRelabelConfigs.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is a list of AlertRelabelConfigs.", +} + +func (AlertRelabelConfigList) SwaggerDoc() map[string]string { + return map_AlertRelabelConfigList +} + +var map_AlertRelabelConfigSpec = map[string]string{ + "": "AlertRelabelConfigsSpec is the desired state of an AlertRelabelConfig resource.", + "configs": "configs is a list of sequentially evaluated alert relabel configs.", +} + +func (AlertRelabelConfigSpec) SwaggerDoc() map[string]string { + return map_AlertRelabelConfigSpec +} + +var map_AlertRelabelConfigStatus = map[string]string{ + "": "AlertRelabelConfigStatus is the status of an AlertRelabelConfig resource.", + "conditions": "conditions contains details on the state of the AlertRelabelConfig, may be empty.", +} + +func (AlertRelabelConfigStatus) SwaggerDoc() map[string]string { + return map_AlertRelabelConfigStatus +} + +var map_AlertingRule = map[string]string{ + "": "AlertingRule represents a set of user-defined Prometheus rule groups containing alerting rules. This resource is the supported method for cluster admins to create alerts based on metrics recorded by the platform monitoring stack in OpenShift, i.e. the Prometheus instance deployed to the openshift-monitoring namespace. You might use this to create custom alerting rules not shipped with OpenShift based on metrics from components such as the node_exporter, which provides machine-level metrics such as CPU usage, or kube-state-metrics, which provides metrics on Kubernetes usage.\n\nThe API is mostly compatible with the upstream PrometheusRule type from the prometheus-operator. The primary difference being that recording rules are not allowed here -- only alerting rules. For each AlertingRule resource created, a corresponding PrometheusRule will be created in the openshift-monitoring namespace. OpenShift requires admins to use the AlertingRule resource rather than the upstream type in order to allow better OpenShift specific defaulting and validation, while not modifying the upstream APIs directly.\n\nYou can find upstream API documentation for PrometheusRule resources here:\n\nhttps://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec describes the desired state of this AlertingRule object.", + "status": "status describes the current state of this AlertOverrides object.", +} + +func (AlertingRule) SwaggerDoc() map[string]string { + return map_AlertingRule +} + +var map_AlertingRuleList = map[string]string{ + "": "AlertingRuleList is a list of AlertingRule objects.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is a list of AlertingRule objects.", +} + +func (AlertingRuleList) SwaggerDoc() map[string]string { + return map_AlertingRuleList +} + +var map_AlertingRuleSpec = map[string]string{ + "": "AlertingRuleSpec is the desired state of an AlertingRule resource.", + "groups": "groups is a list of grouped alerting rules. Rule groups are the unit at which Prometheus parallelizes rule processing. All rules in a single group share a configured evaluation interval. All rules in the group will be processed together on this interval, sequentially, and all rules will be processed.\n\nIt's common to group related alerting rules into a single AlertingRule resources, and within that resource, closely related alerts, or simply alerts with the same interval, into individual groups. You are also free to create AlertingRule resources with only a single rule group, but be aware that this can have a performance impact on Prometheus if the group is extremely large or has very complex query expressions to evaluate. Spreading very complex rules across multiple groups to allow them to be processed in parallel is also a common use-case.", +} + +func (AlertingRuleSpec) SwaggerDoc() map[string]string { + return map_AlertingRuleSpec +} + +var map_AlertingRuleStatus = map[string]string{ + "": "AlertingRuleStatus is the status of an AlertingRule resource.", + "observedGeneration": "observedGeneration is the last generation change you've dealt with.", + "prometheusRule": "prometheusRule is the generated PrometheusRule for this AlertingRule. Each AlertingRule instance results in a generated PrometheusRule object in the same namespace, which is always the openshift-monitoring namespace.", +} + +func (AlertingRuleStatus) SwaggerDoc() map[string]string { + return map_AlertingRuleStatus +} + +var map_PrometheusRuleRef = map[string]string{ + "": "PrometheusRuleRef is a reference to an existing PrometheusRule object. Each AlertingRule instance results in a generated PrometheusRule object in the same namespace, which is always the openshift-monitoring namespace. This is used to point to the generated PrometheusRule object in the AlertingRule status.", + "name": "name of the referenced PrometheusRule.", +} + +func (PrometheusRuleRef) SwaggerDoc() map[string]string { + return map_PrometheusRuleRef +} + +var map_RelabelConfig = map[string]string{ + "": "RelabelConfig allows dynamic rewriting of label sets for alerts. See Prometheus documentation: - https://prometheus.io/docs/prometheus/latest/configuration/configuration/#alert_relabel_configs - https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config", + "sourceLabels": "sourceLabels select values from existing labels. Their content is concatenated using the configured separator and matched against the configured regular expression for the Replace, Keep, and Drop actions.", + "separator": "separator placed between concatenated source label values. When omitted, Prometheus will use its default value of ';'.", + "targetLabel": "targetLabel to which the resulting value is written in a 'Replace' action. It is mandatory for 'Replace' and 'HashMod' actions. Regex capture groups are available.", + "regex": "regex against which the extracted value is matched. Default is: '(.*)'", + "modulus": "modulus to take of the hash of the source label values. This can be combined with the 'HashMod' action to set 'target_label' to the 'modulus' of a hash of the concatenated 'source_labels'.", + "replacement": "replacement value against which a regex replace is performed if the regular expression matches. This is required if the action is 'Replace' or 'LabelMap'. Regex capture groups are available. Default is: '$1'", + "action": "action to perform based on regex matching. Must be one of: Replace, Keep, Drop, HashMod, LabelMap, LabelDrop, or LabelKeep. Default is: 'Replace'", +} + +func (RelabelConfig) SwaggerDoc() map[string]string { + return map_RelabelConfig +} + +var map_Rule = map[string]string{ + "": "Rule describes an alerting rule. See Prometheus documentation: - https://www.prometheus.io/docs/prometheus/latest/configuration/alerting_rules", + "alert": "alert is the name of the alert. Must be a valid label value, i.e. only contain ASCII letters, numbers, and underscores.", + "expr": "expr is the PromQL expression to evaluate. Every evaluation cycle this is evaluated at the current time, and all resultant time series become pending or firing alerts. This is most often a string representing a PromQL expression, e.g.:\n\n mapi_current_pending_csr > mapi_max_pending_csr\n\nIn rare cases this could be a simple integer, e.g. a simple \"1\" if the intent is to create an alert that is always firing. This is sometimes used to create an always-firing \"Watchdog\" alert in order to ensure the alerting pipeline is functional.", + "for": "for is the time period after which alerts are considered firing after first returning results. Alerts which have not yet fired for long enough are considered pending. This is represented as a Prometheus duration, for details on the format see: - https://prometheus.io/docs/prometheus/latest/configuration/configuration/#duration", + "labels": "labels to add or overwrite for each alert. The results of the PromQL expression for the alert will result in an existing set of labels for the alert, after evaluating the expression, for any label specified here with the same name as a label in that set, the label here wins and overwrites the previous value. These should typically be short identifying values that may be useful to query against. A common example is the alert severity:\n\n labels:\n severity: warning", + "annotations": "annotations to add to each alert. These are values that can be used to store longer additional information that you won't query on, such as alert descriptions or runbook links, e.g.:\n\n annotations:\n summary: HAProxy reload failure\n description: |\n This alert fires when HAProxy fails to reload its\n configuration, which will result in the router not picking up\n recently created or modified routes.", +} + +func (Rule) SwaggerDoc() map[string]string { + return map_Rule +} + +var map_RuleGroup = map[string]string{ + "": "RuleGroup is a list of sequentially evaluated alerting rules.", + "name": "name is the name of the group.", + "interval": "interval is how often rules in the group are evaluated. If not specified, it defaults to the global.evaluation_interval configured in Prometheus, which itself defaults to 30 seconds. You can check if this value has been modified from the default on your cluster by inspecting the platform Prometheus configuration:\n\n$ oc -n openshift-monitoring describe prometheus k8s\n\nThe relevant field in that resource is: spec.evaluationInterval\n\nThis is represented as a Prometheus duration, e.g. 1d, 1h30m, 5m, 10s. You can find the upstream documentation here:\n\nhttps://prometheus.io/docs/prometheus/latest/configuration/configuration/#duration", + "rules": "rules is a list of sequentially evaluated alerting rules. Prometheus may process rule groups in parallel, but rules within a single group are always processed sequentially, and all rules are processed.", +} + +func (RuleGroup) SwaggerDoc() map[string]string { + return map_RuleGroup +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/api/network/OWNERS b/vendor/github.com/openshift/api/network/OWNERS new file mode 100644 index 000000000..279009f7a --- /dev/null +++ b/vendor/github.com/openshift/api/network/OWNERS @@ -0,0 +1,4 @@ +reviewers: + - danwinship + - dcbw + - knobunc diff --git a/vendor/github.com/openshift/api/network/install.go b/vendor/github.com/openshift/api/network/install.go new file mode 100644 index 000000000..85bc70623 --- /dev/null +++ b/vendor/github.com/openshift/api/network/install.go @@ -0,0 +1,26 @@ +package network + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + networkv1 "github.com/openshift/api/network/v1" +) + +const ( + GroupName = "network.openshift.io" +) + +var ( + schemeBuilder = runtime.NewSchemeBuilder(networkv1.Install) + // Install is a function which adds every version of this group to a scheme + Install = schemeBuilder.AddToScheme +) + +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +func Kind(kind string) schema.GroupKind { + return schema.GroupKind{Group: GroupName, Kind: kind} +} diff --git a/vendor/github.com/openshift/api/network/v1/001-clusternetwork-crd.yaml b/vendor/github.com/openshift/api/network/v1/001-clusternetwork-crd.yaml new file mode 100644 index 000000000..7609e4d1f --- /dev/null +++ b/vendor/github.com/openshift/api/network/v1/001-clusternetwork-crd.yaml @@ -0,0 +1,102 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/527 + name: clusternetworks.network.openshift.io +spec: + group: network.openshift.io + names: + kind: ClusterNetwork + listKind: ClusterNetworkList + plural: clusternetworks + singular: clusternetwork + scope: Cluster + versions: + - additionalPrinterColumns: + - description: The primary cluster network CIDR + jsonPath: .network + name: Cluster Network + type: string + - description: The service network CIDR + jsonPath: .serviceNetwork + name: Service Network + type: string + - description: The OpenShift SDN network plug-in in use + jsonPath: .pluginName + name: Plugin Name + type: string + name: v1 + schema: + openAPIV3Schema: + description: "ClusterNetwork describes the cluster network. There is normally only one object of this type, named \"default\", which is created by the SDN network plugin based on the master configuration when the cluster is brought up for the first time. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + type: object + required: + - clusterNetworks + - serviceNetwork + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + clusterNetworks: + description: ClusterNetworks is a list of ClusterNetwork objects that defines the global overlay network's L3 space by specifying a set of CIDR and netmasks that the SDN can allocate addresses from. + type: array + items: + description: ClusterNetworkEntry defines an individual cluster network. The CIDRs cannot overlap with other cluster network CIDRs, CIDRs reserved for external ips, CIDRs reserved for service networks, and CIDRs reserved for ingress ips. + type: object + required: + - CIDR + - hostSubnetLength + properties: + CIDR: + description: CIDR defines the total range of a cluster networks address space. + type: string + pattern: ^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$ + hostSubnetLength: + description: HostSubnetLength is the number of bits of the accompanying CIDR address to allocate to each node. eg, 8 would mean that each node would have a /24 slice of the overlay network for its pods. + type: integer + format: int32 + maximum: 30 + minimum: 2 + hostsubnetlength: + description: HostSubnetLength is the number of bits of network to allocate to each node. eg, 8 would mean that each node would have a /24 slice of the overlay network for its pods + type: integer + format: int32 + maximum: 30 + minimum: 2 + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + mtu: + description: MTU is the MTU for the overlay network. This should be 50 less than the MTU of the network connecting the nodes. It is normally autodetected by the cluster network operator. + type: integer + format: int32 + maximum: 65536 + minimum: 576 + network: + description: Network is a CIDR string specifying the global overlay network's L3 space + type: string + pattern: ^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$ + pluginName: + description: PluginName is the name of the network plugin being used + type: string + serviceNetwork: + description: ServiceNetwork is the CIDR range that Service IP addresses are allocated from + type: string + pattern: ^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$ + vxlanPort: + description: VXLANPort sets the VXLAN destination port used by the cluster. It is set by the master configuration file on startup and cannot be edited manually. Valid values for VXLANPort are integers 1-65535 inclusive and if unset defaults to 4789. Changing VXLANPort allows users to resolve issues between openshift SDN and other software trying to use the same VXLAN destination port. + type: integer + format: int32 + maximum: 65535 + minimum: 1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/vendor/github.com/openshift/api/network/v1/002-hostsubnet-crd.yaml b/vendor/github.com/openshift/api/network/v1/002-hostsubnet-crd.yaml new file mode 100644 index 000000000..d8a1f665e --- /dev/null +++ b/vendor/github.com/openshift/api/network/v1/002-hostsubnet-crd.yaml @@ -0,0 +1,88 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/527 + name: hostsubnets.network.openshift.io +spec: + group: network.openshift.io + names: + kind: HostSubnet + listKind: HostSubnetList + plural: hostsubnets + singular: hostsubnet + scope: Cluster + versions: + - additionalPrinterColumns: + - description: The name of the node + jsonPath: .host + name: Host + type: string + - description: The IP address to be used as a VTEP by other nodes in the overlay network + jsonPath: .hostIP + name: Host IP + type: string + - description: The CIDR range of the overlay network assigned to the node for its pods + jsonPath: .subnet + name: Subnet + type: string + - description: The network egress CIDRs + jsonPath: .egressCIDRs + name: Egress CIDRs + type: string + - description: The network egress IP addresses + jsonPath: .egressIPs + name: Egress IPs + type: string + name: v1 + schema: + openAPIV3Schema: + description: "HostSubnet describes the container subnet network on a node. The HostSubnet object must have the same name as the Node object it corresponds to. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + type: object + required: + - host + - hostIP + - subnet + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + egressCIDRs: + description: EgressCIDRs is the list of CIDR ranges available for automatically assigning egress IPs to this node from. If this field is set then EgressIPs should be treated as read-only. + type: array + items: + description: HostSubnetEgressCIDR represents one egress CIDR from which to assign IP addresses for this node represented by the HostSubnet + type: string + pattern: ^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$ + egressIPs: + description: EgressIPs is the list of automatic egress IP addresses currently hosted by this node. If EgressCIDRs is empty, this can be set by hand; if EgressCIDRs is set then the master will overwrite the value here with its own allocation of egress IPs. + type: array + items: + description: HostSubnetEgressIP represents one egress IP address currently hosted on the node represented by HostSubnet + type: string + pattern: ^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])$ + host: + description: Host is the name of the node. (This is the same as the object's name, but both fields must be set.) + type: string + pattern: ^[a-z0-9.-]+$ + hostIP: + description: HostIP is the IP address to be used as a VTEP by other nodes in the overlay network + type: string + pattern: ^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])$ + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + subnet: + description: Subnet is the CIDR range of the overlay network assigned to the node for its pods + type: string + pattern: ^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$ + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/vendor/github.com/openshift/api/network/v1/003-netnamespace-crd.yaml b/vendor/github.com/openshift/api/network/v1/003-netnamespace-crd.yaml new file mode 100644 index 000000000..7525e8810 --- /dev/null +++ b/vendor/github.com/openshift/api/network/v1/003-netnamespace-crd.yaml @@ -0,0 +1,66 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/527 + name: netnamespaces.network.openshift.io +spec: + group: network.openshift.io + names: + kind: NetNamespace + listKind: NetNamespaceList + plural: netnamespaces + singular: netnamespace + scope: Cluster + versions: + - additionalPrinterColumns: + - description: The network identifier of the network namespace + jsonPath: .netid + name: NetID + type: integer + - description: The network egress IP addresses + jsonPath: .egressIPs + name: Egress IPs + type: string + name: v1 + schema: + openAPIV3Schema: + description: "NetNamespace describes a single isolated network. When using the redhat/openshift-ovs-multitenant plugin, every Namespace will have a corresponding NetNamespace object with the same name. (When using redhat/openshift-ovs-subnet, NetNamespaces are not used.) \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + type: object + required: + - netid + - netname + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + egressIPs: + description: EgressIPs is a list of reserved IPs that will be used as the source for external traffic coming from pods in this namespace. (If empty, external traffic will be masqueraded to Node IPs.) + type: array + items: + description: NetNamespaceEgressIP is a single egress IP out of a list of reserved IPs used as source of external traffic coming from pods in this namespace + type: string + pattern: ^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])$ + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + netid: + description: NetID is the network identifier of the network namespace assigned to each overlay network packet. This can be manipulated with the "oc adm pod-network" commands. + type: integer + format: int32 + maximum: 16777215 + minimum: 0 + netname: + description: NetName is the name of the network namespace. (This is the same as the object's name, but both fields must be set.) + type: string + pattern: ^[a-z0-9.-]+$ + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/vendor/github.com/openshift/api/network/v1/004-egressnetworkpolicy-crd.yaml b/vendor/github.com/openshift/api/network/v1/004-egressnetworkpolicy-crd.yaml new file mode 100644 index 000000000..d1b606306 --- /dev/null +++ b/vendor/github.com/openshift/api/network/v1/004-egressnetworkpolicy-crd.yaml @@ -0,0 +1,71 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/527 + name: egressnetworkpolicies.network.openshift.io +spec: + group: network.openshift.io + names: + kind: EgressNetworkPolicy + listKind: EgressNetworkPolicyList + plural: egressnetworkpolicies + singular: egressnetworkpolicy + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "EgressNetworkPolicy describes the current egress network policy for a Namespace. When using the 'redhat/openshift-ovs-multitenant' network plugin, traffic from a pod to an IP address outside the cluster will be checked against each EgressNetworkPolicyRule in the pod's namespace's EgressNetworkPolicy, in order. If no rule matches (or no EgressNetworkPolicy is present) then the traffic will be allowed by default. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec is the specification of the current egress network policy + type: object + required: + - egress + properties: + egress: + description: egress contains the list of egress policy rules + type: array + items: + description: EgressNetworkPolicyRule contains a single egress network policy rule + type: object + required: + - to + - type + properties: + to: + description: to is the target that traffic is allowed/denied to + type: object + properties: + cidrSelector: + description: CIDRSelector is the CIDR range to allow/deny traffic to. If this is set, dnsName must be unset Ideally we would have liked to use the cidr openapi format for this property. But openshift-sdn only supports v4 while specifying the cidr format allows both v4 and v6 cidrs We are therefore using a regex pattern to validate instead. + type: string + pattern: ^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$ + dnsName: + description: DNSName is the domain name to allow/deny traffic to. If this is set, cidrSelector must be unset + type: string + pattern: ^([A-Za-z0-9-]+\.)*[A-Za-z0-9-]+\.?$ + type: + description: type marks this as an "Allow" or "Deny" rule + type: string + pattern: ^Allow|Deny$ + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/vendor/github.com/openshift/api/network/v1/Makefile b/vendor/github.com/openshift/api/network/v1/Makefile new file mode 100644 index 000000000..027afff7c --- /dev/null +++ b/vendor/github.com/openshift/api/network/v1/Makefile @@ -0,0 +1,3 @@ +.PHONY: test +test: + make -C ../../tests test GINKGO_EXTRA_ARGS=--focus="network.openshift.io/v1" diff --git a/vendor/github.com/openshift/api/network/v1/constants.go b/vendor/github.com/openshift/api/network/v1/constants.go new file mode 100644 index 000000000..54c06f331 --- /dev/null +++ b/vendor/github.com/openshift/api/network/v1/constants.go @@ -0,0 +1,17 @@ +package v1 + +const ( + // Pod annotations + AssignMacvlanAnnotation = "pod.network.openshift.io/assign-macvlan" + + // HostSubnet annotations. (Note: should be "hostsubnet.network.openshift.io/", but the incorrect name is now part of the API.) + AssignHostSubnetAnnotation = "pod.network.openshift.io/assign-subnet" + FixedVNIDHostAnnotation = "pod.network.openshift.io/fixed-vnid-host" + NodeUIDAnnotation = "pod.network.openshift.io/node-uid" + + // NetNamespace annotations + MulticastEnabledAnnotation = "netnamespace.network.openshift.io/multicast-enabled" + + // ChangePodNetworkAnnotation is an annotation on NetNamespace to request change of pod network + ChangePodNetworkAnnotation string = "pod.network.openshift.io/multitenant.change-network" +) diff --git a/vendor/github.com/openshift/api/network/v1/doc.go b/vendor/github.com/openshift/api/network/v1/doc.go new file mode 100644 index 000000000..2816420d9 --- /dev/null +++ b/vendor/github.com/openshift/api/network/v1/doc.go @@ -0,0 +1,8 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=github.com/openshift/origin/pkg/network/apis/network +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true + +// +groupName=network.openshift.io +// Package v1 is the v1 version of the API. +package v1 diff --git a/vendor/github.com/openshift/api/network/v1/generated.pb.go b/vendor/github.com/openshift/api/network/v1/generated.pb.go new file mode 100644 index 000000000..9534e3715 --- /dev/null +++ b/vendor/github.com/openshift/api/network/v1/generated.pb.go @@ -0,0 +1,3186 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/openshift/api/network/v1/generated.proto + +package v1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *ClusterNetwork) Reset() { *m = ClusterNetwork{} } +func (*ClusterNetwork) ProtoMessage() {} +func (*ClusterNetwork) Descriptor() ([]byte, []int) { + return fileDescriptor_38d1cb27735fa5d9, []int{0} +} +func (m *ClusterNetwork) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterNetwork) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterNetwork) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterNetwork.Merge(m, src) +} +func (m *ClusterNetwork) XXX_Size() int { + return m.Size() +} +func (m *ClusterNetwork) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterNetwork.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterNetwork proto.InternalMessageInfo + +func (m *ClusterNetworkEntry) Reset() { *m = ClusterNetworkEntry{} } +func (*ClusterNetworkEntry) ProtoMessage() {} +func (*ClusterNetworkEntry) Descriptor() ([]byte, []int) { + return fileDescriptor_38d1cb27735fa5d9, []int{1} +} +func (m *ClusterNetworkEntry) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterNetworkEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterNetworkEntry) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterNetworkEntry.Merge(m, src) +} +func (m *ClusterNetworkEntry) XXX_Size() int { + return m.Size() +} +func (m *ClusterNetworkEntry) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterNetworkEntry.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterNetworkEntry proto.InternalMessageInfo + +func (m *ClusterNetworkList) Reset() { *m = ClusterNetworkList{} } +func (*ClusterNetworkList) ProtoMessage() {} +func (*ClusterNetworkList) Descriptor() ([]byte, []int) { + return fileDescriptor_38d1cb27735fa5d9, []int{2} +} +func (m *ClusterNetworkList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterNetworkList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterNetworkList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterNetworkList.Merge(m, src) +} +func (m *ClusterNetworkList) XXX_Size() int { + return m.Size() +} +func (m *ClusterNetworkList) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterNetworkList.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterNetworkList proto.InternalMessageInfo + +func (m *EgressNetworkPolicy) Reset() { *m = EgressNetworkPolicy{} } +func (*EgressNetworkPolicy) ProtoMessage() {} +func (*EgressNetworkPolicy) Descriptor() ([]byte, []int) { + return fileDescriptor_38d1cb27735fa5d9, []int{3} +} +func (m *EgressNetworkPolicy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EgressNetworkPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EgressNetworkPolicy) XXX_Merge(src proto.Message) { + xxx_messageInfo_EgressNetworkPolicy.Merge(m, src) +} +func (m *EgressNetworkPolicy) XXX_Size() int { + return m.Size() +} +func (m *EgressNetworkPolicy) XXX_DiscardUnknown() { + xxx_messageInfo_EgressNetworkPolicy.DiscardUnknown(m) +} + +var xxx_messageInfo_EgressNetworkPolicy proto.InternalMessageInfo + +func (m *EgressNetworkPolicyList) Reset() { *m = EgressNetworkPolicyList{} } +func (*EgressNetworkPolicyList) ProtoMessage() {} +func (*EgressNetworkPolicyList) Descriptor() ([]byte, []int) { + return fileDescriptor_38d1cb27735fa5d9, []int{4} +} +func (m *EgressNetworkPolicyList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EgressNetworkPolicyList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EgressNetworkPolicyList) XXX_Merge(src proto.Message) { + xxx_messageInfo_EgressNetworkPolicyList.Merge(m, src) +} +func (m *EgressNetworkPolicyList) XXX_Size() int { + return m.Size() +} +func (m *EgressNetworkPolicyList) XXX_DiscardUnknown() { + xxx_messageInfo_EgressNetworkPolicyList.DiscardUnknown(m) +} + +var xxx_messageInfo_EgressNetworkPolicyList proto.InternalMessageInfo + +func (m *EgressNetworkPolicyPeer) Reset() { *m = EgressNetworkPolicyPeer{} } +func (*EgressNetworkPolicyPeer) ProtoMessage() {} +func (*EgressNetworkPolicyPeer) Descriptor() ([]byte, []int) { + return fileDescriptor_38d1cb27735fa5d9, []int{5} +} +func (m *EgressNetworkPolicyPeer) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EgressNetworkPolicyPeer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EgressNetworkPolicyPeer) XXX_Merge(src proto.Message) { + xxx_messageInfo_EgressNetworkPolicyPeer.Merge(m, src) +} +func (m *EgressNetworkPolicyPeer) XXX_Size() int { + return m.Size() +} +func (m *EgressNetworkPolicyPeer) XXX_DiscardUnknown() { + xxx_messageInfo_EgressNetworkPolicyPeer.DiscardUnknown(m) +} + +var xxx_messageInfo_EgressNetworkPolicyPeer proto.InternalMessageInfo + +func (m *EgressNetworkPolicyRule) Reset() { *m = EgressNetworkPolicyRule{} } +func (*EgressNetworkPolicyRule) ProtoMessage() {} +func (*EgressNetworkPolicyRule) Descriptor() ([]byte, []int) { + return fileDescriptor_38d1cb27735fa5d9, []int{6} +} +func (m *EgressNetworkPolicyRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EgressNetworkPolicyRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EgressNetworkPolicyRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_EgressNetworkPolicyRule.Merge(m, src) +} +func (m *EgressNetworkPolicyRule) XXX_Size() int { + return m.Size() +} +func (m *EgressNetworkPolicyRule) XXX_DiscardUnknown() { + xxx_messageInfo_EgressNetworkPolicyRule.DiscardUnknown(m) +} + +var xxx_messageInfo_EgressNetworkPolicyRule proto.InternalMessageInfo + +func (m *EgressNetworkPolicySpec) Reset() { *m = EgressNetworkPolicySpec{} } +func (*EgressNetworkPolicySpec) ProtoMessage() {} +func (*EgressNetworkPolicySpec) Descriptor() ([]byte, []int) { + return fileDescriptor_38d1cb27735fa5d9, []int{7} +} +func (m *EgressNetworkPolicySpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EgressNetworkPolicySpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EgressNetworkPolicySpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_EgressNetworkPolicySpec.Merge(m, src) +} +func (m *EgressNetworkPolicySpec) XXX_Size() int { + return m.Size() +} +func (m *EgressNetworkPolicySpec) XXX_DiscardUnknown() { + xxx_messageInfo_EgressNetworkPolicySpec.DiscardUnknown(m) +} + +var xxx_messageInfo_EgressNetworkPolicySpec proto.InternalMessageInfo + +func (m *HostSubnet) Reset() { *m = HostSubnet{} } +func (*HostSubnet) ProtoMessage() {} +func (*HostSubnet) Descriptor() ([]byte, []int) { + return fileDescriptor_38d1cb27735fa5d9, []int{8} +} +func (m *HostSubnet) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HostSubnet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HostSubnet) XXX_Merge(src proto.Message) { + xxx_messageInfo_HostSubnet.Merge(m, src) +} +func (m *HostSubnet) XXX_Size() int { + return m.Size() +} +func (m *HostSubnet) XXX_DiscardUnknown() { + xxx_messageInfo_HostSubnet.DiscardUnknown(m) +} + +var xxx_messageInfo_HostSubnet proto.InternalMessageInfo + +func (m *HostSubnetList) Reset() { *m = HostSubnetList{} } +func (*HostSubnetList) ProtoMessage() {} +func (*HostSubnetList) Descriptor() ([]byte, []int) { + return fileDescriptor_38d1cb27735fa5d9, []int{9} +} +func (m *HostSubnetList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HostSubnetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HostSubnetList) XXX_Merge(src proto.Message) { + xxx_messageInfo_HostSubnetList.Merge(m, src) +} +func (m *HostSubnetList) XXX_Size() int { + return m.Size() +} +func (m *HostSubnetList) XXX_DiscardUnknown() { + xxx_messageInfo_HostSubnetList.DiscardUnknown(m) +} + +var xxx_messageInfo_HostSubnetList proto.InternalMessageInfo + +func (m *NetNamespace) Reset() { *m = NetNamespace{} } +func (*NetNamespace) ProtoMessage() {} +func (*NetNamespace) Descriptor() ([]byte, []int) { + return fileDescriptor_38d1cb27735fa5d9, []int{10} +} +func (m *NetNamespace) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetNamespace) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NetNamespace) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetNamespace.Merge(m, src) +} +func (m *NetNamespace) XXX_Size() int { + return m.Size() +} +func (m *NetNamespace) XXX_DiscardUnknown() { + xxx_messageInfo_NetNamespace.DiscardUnknown(m) +} + +var xxx_messageInfo_NetNamespace proto.InternalMessageInfo + +func (m *NetNamespaceList) Reset() { *m = NetNamespaceList{} } +func (*NetNamespaceList) ProtoMessage() {} +func (*NetNamespaceList) Descriptor() ([]byte, []int) { + return fileDescriptor_38d1cb27735fa5d9, []int{11} +} +func (m *NetNamespaceList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetNamespaceList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NetNamespaceList) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetNamespaceList.Merge(m, src) +} +func (m *NetNamespaceList) XXX_Size() int { + return m.Size() +} +func (m *NetNamespaceList) XXX_DiscardUnknown() { + xxx_messageInfo_NetNamespaceList.DiscardUnknown(m) +} + +var xxx_messageInfo_NetNamespaceList proto.InternalMessageInfo + +func init() { + proto.RegisterType((*ClusterNetwork)(nil), "github.com.openshift.api.network.v1.ClusterNetwork") + proto.RegisterType((*ClusterNetworkEntry)(nil), "github.com.openshift.api.network.v1.ClusterNetworkEntry") + proto.RegisterType((*ClusterNetworkList)(nil), "github.com.openshift.api.network.v1.ClusterNetworkList") + proto.RegisterType((*EgressNetworkPolicy)(nil), "github.com.openshift.api.network.v1.EgressNetworkPolicy") + proto.RegisterType((*EgressNetworkPolicyList)(nil), "github.com.openshift.api.network.v1.EgressNetworkPolicyList") + proto.RegisterType((*EgressNetworkPolicyPeer)(nil), "github.com.openshift.api.network.v1.EgressNetworkPolicyPeer") + proto.RegisterType((*EgressNetworkPolicyRule)(nil), "github.com.openshift.api.network.v1.EgressNetworkPolicyRule") + proto.RegisterType((*EgressNetworkPolicySpec)(nil), "github.com.openshift.api.network.v1.EgressNetworkPolicySpec") + proto.RegisterType((*HostSubnet)(nil), "github.com.openshift.api.network.v1.HostSubnet") + proto.RegisterType((*HostSubnetList)(nil), "github.com.openshift.api.network.v1.HostSubnetList") + proto.RegisterType((*NetNamespace)(nil), "github.com.openshift.api.network.v1.NetNamespace") + proto.RegisterType((*NetNamespaceList)(nil), "github.com.openshift.api.network.v1.NetNamespaceList") +} + +func init() { + proto.RegisterFile("github.com/openshift/api/network/v1/generated.proto", fileDescriptor_38d1cb27735fa5d9) +} + +var fileDescriptor_38d1cb27735fa5d9 = []byte{ + // 996 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0x4f, 0x6f, 0xe3, 0x44, + 0x14, 0xaf, 0xf3, 0xa7, 0x6d, 0x26, 0x6d, 0x5a, 0xcd, 0x56, 0xac, 0x29, 0x92, 0x13, 0xb9, 0x02, + 0x82, 0x56, 0xd8, 0xb4, 0x8b, 0x50, 0x0f, 0x08, 0xb4, 0x6e, 0x2b, 0x6d, 0xa4, 0x6e, 0x88, 0x26, + 0x65, 0x55, 0x21, 0x40, 0xb8, 0xce, 0xac, 0x63, 0x9a, 0xd8, 0x96, 0x67, 0x12, 0x88, 0x10, 0x7f, + 0x2e, 0xdc, 0xf9, 0x00, 0x7c, 0x0c, 0x3e, 0x02, 0x87, 0x1e, 0x38, 0xec, 0x09, 0xf6, 0x14, 0x51, + 0x73, 0xe7, 0x03, 0xf4, 0x84, 0x66, 0x3c, 0x8e, 0xed, 0xac, 0x2b, 0xa2, 0x22, 0x72, 0x4a, 0xe6, + 0xfd, 0xde, 0xdf, 0xf9, 0xbd, 0xf7, 0xc6, 0xe0, 0xa1, 0xed, 0xd0, 0xfe, 0xe8, 0x42, 0xb3, 0xbc, + 0xa1, 0xee, 0xf9, 0xd8, 0x25, 0x7d, 0xe7, 0x19, 0xd5, 0x4d, 0xdf, 0xd1, 0x5d, 0x4c, 0xbf, 0xf2, + 0x82, 0x4b, 0x7d, 0xbc, 0xaf, 0xdb, 0xd8, 0xc5, 0x81, 0x49, 0x71, 0x4f, 0xf3, 0x03, 0x8f, 0x7a, + 0x70, 0x2f, 0x31, 0xd2, 0x66, 0x46, 0x9a, 0xe9, 0x3b, 0x9a, 0x30, 0xd2, 0xc6, 0xfb, 0xbb, 0x6f, + 0xa7, 0x3c, 0xdb, 0x9e, 0xed, 0xe9, 0xdc, 0xf6, 0x62, 0xf4, 0x8c, 0x9f, 0xf8, 0x81, 0xff, 0x8b, + 0x7c, 0xee, 0xbe, 0x7b, 0x79, 0x48, 0x34, 0xc7, 0x63, 0xa1, 0x87, 0xa6, 0xd5, 0x77, 0x5c, 0x1c, + 0x4c, 0x74, 0xff, 0xd2, 0x66, 0x02, 0xa2, 0x0f, 0x31, 0x35, 0x73, 0x32, 0xd9, 0x7d, 0xef, 0x36, + 0xab, 0x60, 0xe4, 0x52, 0x67, 0x88, 0x75, 0x62, 0xf5, 0xf1, 0xd0, 0x9c, 0xb7, 0x53, 0x7f, 0x2e, + 0x81, 0xda, 0xd1, 0x60, 0x44, 0x28, 0x0e, 0xda, 0x51, 0xca, 0xf0, 0x0b, 0xb0, 0xce, 0xa2, 0xf4, + 0x4c, 0x6a, 0xca, 0x52, 0x43, 0x6a, 0x56, 0x0f, 0xde, 0xd1, 0x22, 0xef, 0x5a, 0xda, 0xbb, 0xe6, + 0x5f, 0xda, 0x4c, 0x40, 0x34, 0xa6, 0xad, 0x8d, 0xf7, 0xb5, 0x8f, 0x2e, 0xbe, 0xc4, 0x16, 0x7d, + 0x82, 0xa9, 0x69, 0xc0, 0xab, 0x69, 0x7d, 0x25, 0x9c, 0xd6, 0x41, 0x22, 0x43, 0x33, 0xaf, 0xf0, + 0x2d, 0xb0, 0x26, 0xee, 0x47, 0x2e, 0x34, 0xa4, 0x66, 0xc5, 0xd8, 0x12, 0xea, 0x6b, 0x22, 0x07, + 0x14, 0xe3, 0xf0, 0x18, 0x6c, 0xf7, 0x3d, 0x42, 0xc9, 0xe8, 0xc2, 0xc5, 0x74, 0x80, 0x5d, 0x9b, + 0xf6, 0xe5, 0x62, 0x43, 0x6a, 0x6e, 0x1a, 0xb2, 0xb0, 0xd9, 0x7e, 0xec, 0x11, 0xda, 0xe5, 0xf8, + 0x29, 0xc7, 0xd1, 0x4b, 0x16, 0xf0, 0x03, 0x50, 0x23, 0x38, 0x18, 0x3b, 0x16, 0x16, 0x01, 0xe4, + 0x12, 0x8f, 0xfb, 0x8a, 0xf0, 0x51, 0xeb, 0x66, 0x50, 0x34, 0xa7, 0x0d, 0x0f, 0x00, 0xf0, 0x07, + 0x23, 0xdb, 0x71, 0xdb, 0xe6, 0x10, 0xcb, 0x65, 0x6e, 0x3b, 0x2b, 0xb1, 0x33, 0x43, 0x50, 0x4a, + 0x0b, 0x7e, 0x03, 0xb6, 0xac, 0xcc, 0xc5, 0x12, 0x79, 0xb5, 0x51, 0x6c, 0x56, 0x0f, 0x0e, 0xb5, + 0x05, 0xba, 0x46, 0xcb, 0x92, 0x72, 0xe2, 0xd2, 0x60, 0x62, 0xdc, 0x17, 0x21, 0xb7, 0xb2, 0x20, + 0x41, 0xf3, 0x91, 0xe0, 0x03, 0x50, 0x19, 0x7f, 0x3d, 0x30, 0xdd, 0x8e, 0x17, 0x50, 0x79, 0x8d, + 0xdf, 0xd7, 0x66, 0x38, 0xad, 0x57, 0x9e, 0x9e, 0x9f, 0x3e, 0x6a, 0x33, 0x21, 0x4a, 0x70, 0xf8, + 0x2a, 0x28, 0x0e, 0xe9, 0x48, 0x5e, 0xe7, 0x6a, 0x6b, 0xe1, 0xb4, 0x5e, 0x7c, 0x72, 0xf6, 0x31, + 0x62, 0x32, 0xf5, 0x5b, 0x70, 0x2f, 0x27, 0x11, 0xd8, 0x00, 0x25, 0xcb, 0xe9, 0x05, 0xbc, 0x3d, + 0x2a, 0xc6, 0x86, 0x48, 0xab, 0x74, 0xd4, 0x3a, 0x46, 0x88, 0x23, 0x31, 0x6f, 0x69, 0x5e, 0x38, + 0xd7, 0xff, 0xca, 0x5b, 0x5a, 0xa2, 0xfe, 0x26, 0x01, 0x98, 0x8d, 0x7f, 0xea, 0x10, 0x0a, 0x3f, + 0x7d, 0xa9, 0x43, 0xb5, 0xc5, 0x3a, 0x94, 0x59, 0xf3, 0xfe, 0xdc, 0x16, 0x49, 0xac, 0xc7, 0x92, + 0x54, 0x77, 0x9e, 0x83, 0xb2, 0x43, 0xf1, 0x90, 0xc8, 0x05, 0x4e, 0xd7, 0xc3, 0x3b, 0xd0, 0x65, + 0x6c, 0x0a, 0xff, 0xe5, 0x16, 0xf3, 0x84, 0x22, 0x87, 0xea, 0x1f, 0x12, 0xb8, 0x77, 0x62, 0x07, + 0x98, 0x10, 0xa1, 0xd7, 0xf1, 0x06, 0x8e, 0x35, 0x59, 0xc2, 0xc4, 0x7d, 0x0e, 0x4a, 0xc4, 0xc7, + 0x16, 0xa7, 0xa0, 0x7a, 0xf0, 0xfe, 0x42, 0x25, 0xe5, 0x64, 0xda, 0xf5, 0xb1, 0x95, 0xd0, 0xcd, + 0x4e, 0x88, 0xfb, 0x55, 0x7f, 0x97, 0xc0, 0xfd, 0x1c, 0xfd, 0x25, 0xb0, 0xf5, 0x59, 0x96, 0xad, + 0xc3, 0xbb, 0x96, 0x76, 0x0b, 0x65, 0xdf, 0xe5, 0xd6, 0xd5, 0xc1, 0x38, 0x80, 0x87, 0x60, 0x83, + 0xb5, 0x7a, 0x17, 0x0f, 0xb0, 0x45, 0xbd, 0x78, 0x18, 0x76, 0x84, 0x9b, 0x0d, 0x36, 0x0c, 0x31, + 0x86, 0x32, 0x9a, 0x6c, 0xff, 0xf5, 0x5c, 0xc2, 0x77, 0xc9, 0xdc, 0xfe, 0x3b, 0x6e, 0x77, 0xf9, + 0x22, 0x89, 0x71, 0xf5, 0x97, 0xfc, 0x8b, 0x45, 0xa3, 0x01, 0x86, 0x1f, 0x82, 0x12, 0x9d, 0xf8, + 0x58, 0x04, 0x7e, 0x10, 0xd3, 0x72, 0x36, 0xf1, 0xf1, 0xcd, 0xb4, 0xfe, 0xda, 0x2d, 0x66, 0x0c, + 0x46, 0xdc, 0x10, 0x9e, 0x83, 0x02, 0xf5, 0xfe, 0x6b, 0x4f, 0xb0, 0xbb, 0x30, 0x80, 0x08, 0x5e, + 0x38, 0xf3, 0x50, 0x81, 0x7a, 0xea, 0xf7, 0xb9, 0x59, 0xb3, 0x86, 0x81, 0x3d, 0xb0, 0x8a, 0x39, + 0x24, 0x4b, 0x9c, 0xb1, 0x3b, 0x07, 0x66, 0xc5, 0x18, 0x35, 0x11, 0x78, 0x35, 0x52, 0x40, 0xc2, + 0xb7, 0xfa, 0x77, 0x01, 0x80, 0x64, 0xc1, 0x2c, 0x61, 0xc2, 0x1a, 0xa0, 0xc4, 0xd6, 0x97, 0x20, + 0x74, 0x36, 0x23, 0x2c, 0x07, 0xc4, 0x11, 0xf8, 0x06, 0x58, 0x65, 0xbf, 0xad, 0x0e, 0x7f, 0xc0, + 0x2a, 0x49, 0xea, 0x8f, 0xb9, 0x14, 0x09, 0x94, 0xe9, 0x45, 0x8f, 0x97, 0x78, 0xa4, 0x66, 0x7a, + 0x51, 0x2d, 0x48, 0xa0, 0xf0, 0x11, 0xa8, 0x44, 0xc5, 0xb6, 0x3a, 0x44, 0x2e, 0x37, 0x8a, 0xcd, + 0x8a, 0xb1, 0xc7, 0x76, 0xfc, 0x49, 0x2c, 0xbc, 0x99, 0xd6, 0x61, 0x72, 0x07, 0xb1, 0x18, 0x25, + 0x56, 0xb0, 0x05, 0xaa, 0xd1, 0x81, 0x35, 0x6b, 0xf4, 0x3e, 0x55, 0x8c, 0x37, 0xc3, 0x69, 0xbd, + 0x7a, 0x92, 0x88, 0x6f, 0xa6, 0xf5, 0x9d, 0x79, 0x37, 0x7c, 0xd3, 0xa7, 0x6d, 0xd5, 0x5f, 0x25, + 0x50, 0x4b, 0x6d, 0xf4, 0xff, 0x7f, 0xf0, 0xcf, 0xb2, 0x83, 0xaf, 0x2f, 0xd4, 0x46, 0x49, 0x86, + 0xb7, 0xcc, 0xfb, 0x8f, 0x05, 0xb0, 0xd1, 0xc6, 0x94, 0xcd, 0x1e, 0xf1, 0x4d, 0x0b, 0x2f, 0xed, + 0x6b, 0xc8, 0xcd, 0xd9, 0x06, 0x22, 0x11, 0x14, 0xe3, 0x70, 0x0f, 0x94, 0x5d, 0x4c, 0x9d, 0x9e, + 0xf8, 0x04, 0x9a, 0x95, 0xd0, 0xc6, 0xb4, 0x75, 0x8c, 0x22, 0x0c, 0x1e, 0xa5, 0xfb, 0xa2, 0xc4, + 0x29, 0x7d, 0x7d, 0xbe, 0x2f, 0x76, 0xd2, 0x35, 0xe6, 0x74, 0x86, 0x7a, 0x25, 0x81, 0xed, 0xb4, + 0xce, 0x12, 0x08, 0x7d, 0x9a, 0x25, 0x74, 0x7f, 0x21, 0x42, 0xd3, 0x39, 0xe6, 0x53, 0x6a, 0xb4, + 0xae, 0xae, 0x95, 0x95, 0xe7, 0xd7, 0xca, 0xca, 0x8b, 0x6b, 0x65, 0xe5, 0x87, 0x50, 0x91, 0xae, + 0x42, 0x45, 0x7a, 0x1e, 0x2a, 0xd2, 0x8b, 0x50, 0x91, 0xfe, 0x0c, 0x15, 0xe9, 0xa7, 0xbf, 0x94, + 0x95, 0x4f, 0xf6, 0x16, 0xf8, 0xfe, 0xff, 0x27, 0x00, 0x00, 0xff, 0xff, 0x6b, 0x4d, 0xd5, 0x11, + 0x25, 0x0c, 0x00, 0x00, +} + +func (m *ClusterNetwork) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterNetwork) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterNetwork) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.MTU != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.MTU)) + i-- + dAtA[i] = 0x40 + } + if m.VXLANPort != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.VXLANPort)) + i-- + dAtA[i] = 0x38 + } + if len(m.ClusterNetworks) > 0 { + for iNdEx := len(m.ClusterNetworks) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ClusterNetworks[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + i -= len(m.PluginName) + copy(dAtA[i:], m.PluginName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PluginName))) + i-- + dAtA[i] = 0x2a + i -= len(m.ServiceNetwork) + copy(dAtA[i:], m.ServiceNetwork) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceNetwork))) + i-- + dAtA[i] = 0x22 + i = encodeVarintGenerated(dAtA, i, uint64(m.HostSubnetLength)) + i-- + dAtA[i] = 0x18 + i -= len(m.Network) + copy(dAtA[i:], m.Network) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Network))) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ClusterNetworkEntry) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterNetworkEntry) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterNetworkEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.HostSubnetLength)) + i-- + dAtA[i] = 0x10 + i -= len(m.CIDR) + copy(dAtA[i:], m.CIDR) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CIDR))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ClusterNetworkList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterNetworkList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterNetworkList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *EgressNetworkPolicy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EgressNetworkPolicy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EgressNetworkPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *EgressNetworkPolicyList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EgressNetworkPolicyList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EgressNetworkPolicyList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *EgressNetworkPolicyPeer) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EgressNetworkPolicyPeer) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EgressNetworkPolicyPeer) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.DNSName) + copy(dAtA[i:], m.DNSName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DNSName))) + i-- + dAtA[i] = 0x12 + i -= len(m.CIDRSelector) + copy(dAtA[i:], m.CIDRSelector) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CIDRSelector))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *EgressNetworkPolicyRule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EgressNetworkPolicyRule) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EgressNetworkPolicyRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.To.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *EgressNetworkPolicySpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EgressNetworkPolicySpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EgressNetworkPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Egress) > 0 { + for iNdEx := len(m.Egress) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Egress[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *HostSubnet) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HostSubnet) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HostSubnet) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.EgressCIDRs) > 0 { + for iNdEx := len(m.EgressCIDRs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.EgressCIDRs[iNdEx]) + copy(dAtA[i:], m.EgressCIDRs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.EgressCIDRs[iNdEx]))) + i-- + dAtA[i] = 0x32 + } + } + if len(m.EgressIPs) > 0 { + for iNdEx := len(m.EgressIPs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.EgressIPs[iNdEx]) + copy(dAtA[i:], m.EgressIPs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.EgressIPs[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + i -= len(m.Subnet) + copy(dAtA[i:], m.Subnet) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Subnet))) + i-- + dAtA[i] = 0x22 + i -= len(m.HostIP) + copy(dAtA[i:], m.HostIP) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.HostIP))) + i-- + dAtA[i] = 0x1a + i -= len(m.Host) + copy(dAtA[i:], m.Host) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *HostSubnetList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HostSubnetList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HostSubnetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *NetNamespace) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NetNamespace) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetNamespace) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.EgressIPs) > 0 { + for iNdEx := len(m.EgressIPs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.EgressIPs[iNdEx]) + copy(dAtA[i:], m.EgressIPs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.EgressIPs[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + i = encodeVarintGenerated(dAtA, i, uint64(m.NetID)) + i-- + dAtA[i] = 0x18 + i -= len(m.NetName) + copy(dAtA[i:], m.NetName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NetName))) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *NetNamespaceList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NetNamespaceList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetNamespaceList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ClusterNetwork) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Network) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.HostSubnetLength)) + l = len(m.ServiceNetwork) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.PluginName) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.ClusterNetworks) > 0 { + for _, e := range m.ClusterNetworks { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.VXLANPort != nil { + n += 1 + sovGenerated(uint64(*m.VXLANPort)) + } + if m.MTU != nil { + n += 1 + sovGenerated(uint64(*m.MTU)) + } + return n +} + +func (m *ClusterNetworkEntry) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.CIDR) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.HostSubnetLength)) + return n +} + +func (m *ClusterNetworkList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *EgressNetworkPolicy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *EgressNetworkPolicyList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *EgressNetworkPolicyPeer) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.CIDRSelector) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DNSName) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *EgressNetworkPolicyRule) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = m.To.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *EgressNetworkPolicySpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Egress) > 0 { + for _, e := range m.Egress { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *HostSubnet) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Host) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.HostIP) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Subnet) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.EgressIPs) > 0 { + for _, s := range m.EgressIPs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.EgressCIDRs) > 0 { + for _, s := range m.EgressCIDRs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *HostSubnetList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *NetNamespace) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.NetName) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.NetID)) + if len(m.EgressIPs) > 0 { + for _, s := range m.EgressIPs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *NetNamespaceList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *ClusterNetwork) String() string { + if this == nil { + return "nil" + } + repeatedStringForClusterNetworks := "[]ClusterNetworkEntry{" + for _, f := range this.ClusterNetworks { + repeatedStringForClusterNetworks += strings.Replace(strings.Replace(f.String(), "ClusterNetworkEntry", "ClusterNetworkEntry", 1), `&`, ``, 1) + "," + } + repeatedStringForClusterNetworks += "}" + s := strings.Join([]string{`&ClusterNetwork{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Network:` + fmt.Sprintf("%v", this.Network) + `,`, + `HostSubnetLength:` + fmt.Sprintf("%v", this.HostSubnetLength) + `,`, + `ServiceNetwork:` + fmt.Sprintf("%v", this.ServiceNetwork) + `,`, + `PluginName:` + fmt.Sprintf("%v", this.PluginName) + `,`, + `ClusterNetworks:` + repeatedStringForClusterNetworks + `,`, + `VXLANPort:` + valueToStringGenerated(this.VXLANPort) + `,`, + `MTU:` + valueToStringGenerated(this.MTU) + `,`, + `}`, + }, "") + return s +} +func (this *ClusterNetworkEntry) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClusterNetworkEntry{`, + `CIDR:` + fmt.Sprintf("%v", this.CIDR) + `,`, + `HostSubnetLength:` + fmt.Sprintf("%v", this.HostSubnetLength) + `,`, + `}`, + }, "") + return s +} +func (this *ClusterNetworkList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ClusterNetwork{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ClusterNetwork", "ClusterNetwork", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ClusterNetworkList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *EgressNetworkPolicy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EgressNetworkPolicy{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "EgressNetworkPolicySpec", "EgressNetworkPolicySpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *EgressNetworkPolicyList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]EgressNetworkPolicy{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "EgressNetworkPolicy", "EgressNetworkPolicy", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&EgressNetworkPolicyList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *EgressNetworkPolicyPeer) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EgressNetworkPolicyPeer{`, + `CIDRSelector:` + fmt.Sprintf("%v", this.CIDRSelector) + `,`, + `DNSName:` + fmt.Sprintf("%v", this.DNSName) + `,`, + `}`, + }, "") + return s +} +func (this *EgressNetworkPolicyRule) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EgressNetworkPolicyRule{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `To:` + strings.Replace(strings.Replace(this.To.String(), "EgressNetworkPolicyPeer", "EgressNetworkPolicyPeer", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *EgressNetworkPolicySpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForEgress := "[]EgressNetworkPolicyRule{" + for _, f := range this.Egress { + repeatedStringForEgress += strings.Replace(strings.Replace(f.String(), "EgressNetworkPolicyRule", "EgressNetworkPolicyRule", 1), `&`, ``, 1) + "," + } + repeatedStringForEgress += "}" + s := strings.Join([]string{`&EgressNetworkPolicySpec{`, + `Egress:` + repeatedStringForEgress + `,`, + `}`, + }, "") + return s +} +func (this *HostSubnet) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HostSubnet{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Host:` + fmt.Sprintf("%v", this.Host) + `,`, + `HostIP:` + fmt.Sprintf("%v", this.HostIP) + `,`, + `Subnet:` + fmt.Sprintf("%v", this.Subnet) + `,`, + `EgressIPs:` + fmt.Sprintf("%v", this.EgressIPs) + `,`, + `EgressCIDRs:` + fmt.Sprintf("%v", this.EgressCIDRs) + `,`, + `}`, + }, "") + return s +} +func (this *HostSubnetList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]HostSubnet{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "HostSubnet", "HostSubnet", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&HostSubnetList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *NetNamespace) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NetNamespace{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `NetName:` + fmt.Sprintf("%v", this.NetName) + `,`, + `NetID:` + fmt.Sprintf("%v", this.NetID) + `,`, + `EgressIPs:` + fmt.Sprintf("%v", this.EgressIPs) + `,`, + `}`, + }, "") + return s +} +func (this *NetNamespaceList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]NetNamespace{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "NetNamespace", "NetNamespace", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&NetNamespaceList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ClusterNetwork) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterNetwork: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterNetwork: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Network", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Network = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HostSubnetLength", wireType) + } + m.HostSubnetLength = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.HostSubnetLength |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceNetwork", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceNetwork = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PluginName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PluginName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterNetworks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClusterNetworks = append(m.ClusterNetworks, ClusterNetworkEntry{}) + if err := m.ClusterNetworks[len(m.ClusterNetworks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field VXLANPort", wireType) + } + var v uint32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.VXLANPort = &v + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MTU", wireType) + } + var v uint32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.MTU = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterNetworkEntry) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterNetworkEntry: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterNetworkEntry: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CIDR", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CIDR = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HostSubnetLength", wireType) + } + m.HostSubnetLength = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.HostSubnetLength |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterNetworkList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterNetworkList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterNetworkList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ClusterNetwork{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EgressNetworkPolicy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EgressNetworkPolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EgressNetworkPolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EgressNetworkPolicyList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EgressNetworkPolicyList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EgressNetworkPolicyList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, EgressNetworkPolicy{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EgressNetworkPolicyPeer) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EgressNetworkPolicyPeer: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EgressNetworkPolicyPeer: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CIDRSelector", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CIDRSelector = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DNSName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DNSName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EgressNetworkPolicyRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EgressNetworkPolicyRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EgressNetworkPolicyRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = EgressNetworkPolicyRuleType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field To", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.To.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EgressNetworkPolicySpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EgressNetworkPolicySpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EgressNetworkPolicySpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Egress", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Egress = append(m.Egress, EgressNetworkPolicyRule{}) + if err := m.Egress[len(m.Egress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HostSubnet) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HostSubnet: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HostSubnet: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Host = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HostIP", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.HostIP = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subnet", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subnet = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EgressIPs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EgressIPs = append(m.EgressIPs, HostSubnetEgressIP(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EgressCIDRs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EgressCIDRs = append(m.EgressCIDRs, HostSubnetEgressCIDR(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HostSubnetList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HostSubnetList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HostSubnetList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, HostSubnet{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetNamespace) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetNamespace: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetNamespace: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NetName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NetName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NetID", wireType) + } + m.NetID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NetID |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EgressIPs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EgressIPs = append(m.EgressIPs, NetNamespaceEgressIP(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetNamespaceList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetNamespaceList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetNamespaceList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, NetNamespace{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/openshift/api/network/v1/generated.proto b/vendor/github.com/openshift/api/network/v1/generated.proto new file mode 100644 index 000000000..213de6cf5 --- /dev/null +++ b/vendor/github.com/openshift/api/network/v1/generated.proto @@ -0,0 +1,243 @@ + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package github.com.openshift.api.network.v1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "github.com/openshift/api/network/v1"; + +// ClusterNetwork describes the cluster network. There is normally only one object of this type, +// named "default", which is created by the SDN network plugin based on the master configuration +// when the cluster is brought up for the first time. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +kubebuilder:resource:scope="Cluster" +// +kubebuilder:printcolumn:name="Cluster Network",type=string,JSONPath=`.network`,description="The primary cluster network CIDR" +// +kubebuilder:printcolumn:name="Service Network",type=string,JSONPath=`.serviceNetwork`,description="The service network CIDR" +// +kubebuilder:printcolumn:name="Plugin Name",type=string,JSONPath=`.pluginName`,description="The Openshift SDN network plug-in in use" +// +openshift:compatibility-gen:level=1 +message ClusterNetwork { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Network is a CIDR string specifying the global overlay network's L3 space + // +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$` + optional string network = 2; + + // HostSubnetLength is the number of bits of network to allocate to each node. eg, 8 would mean that each node would have a /24 slice of the overlay network for its pods + // +kubebuilder:validation:Minimum=2 + // +kubebuilder:validation:Maximum=30 + optional uint32 hostsubnetlength = 3; + + // ServiceNetwork is the CIDR range that Service IP addresses are allocated from + // +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$` + optional string serviceNetwork = 4; + + // PluginName is the name of the network plugin being used + optional string pluginName = 5; + + // ClusterNetworks is a list of ClusterNetwork objects that defines the global overlay network's L3 space by specifying a set of CIDR and netmasks that the SDN can allocate addresses from. + repeated ClusterNetworkEntry clusterNetworks = 6; + + // VXLANPort sets the VXLAN destination port used by the cluster. + // It is set by the master configuration file on startup and cannot be edited manually. + // Valid values for VXLANPort are integers 1-65535 inclusive and if unset defaults to 4789. + // Changing VXLANPort allows users to resolve issues between openshift SDN and other software trying to use the same VXLAN destination port. + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=65535 + // +kubebuilder:validation:Optional + // +optional + optional uint32 vxlanPort = 7; + + // MTU is the MTU for the overlay network. This should be 50 less than the MTU of the network connecting the nodes. It is normally autodetected by the cluster network operator. + // +kubebuilder:validation:Minimum=576 + // +kubebuilder:validation:Maximum=65536 + // +kubebuilder:validation:Optional + // +optional + optional uint32 mtu = 8; +} + +// ClusterNetworkEntry defines an individual cluster network. The CIDRs cannot overlap with other cluster network CIDRs, CIDRs reserved for external ips, CIDRs reserved for service networks, and CIDRs reserved for ingress ips. +message ClusterNetworkEntry { + // CIDR defines the total range of a cluster networks address space. + // +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$` + optional string cidr = 1; + + // HostSubnetLength is the number of bits of the accompanying CIDR address to allocate to each node. eg, 8 would mean that each node would have a /24 slice of the overlay network for its pods. + // +kubebuilder:validation:Minimum=2 + // +kubebuilder:validation:Maximum=30 + optional uint32 hostSubnetLength = 2; +} + +// ClusterNetworkList is a collection of ClusterNetworks +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message ClusterNetworkList { + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of cluster networks + repeated ClusterNetwork items = 2; +} + +// EgressNetworkPolicy describes the current egress network policy for a Namespace. When using +// the 'redhat/openshift-ovs-multitenant' network plugin, traffic from a pod to an IP address +// outside the cluster will be checked against each EgressNetworkPolicyRule in the pod's +// namespace's EgressNetworkPolicy, in order. If no rule matches (or no EgressNetworkPolicy +// is present) then the traffic will be allowed by default. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message EgressNetworkPolicy { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // spec is the specification of the current egress network policy + optional EgressNetworkPolicySpec spec = 2; +} + +// EgressNetworkPolicyList is a collection of EgressNetworkPolicy +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message EgressNetworkPolicyList { + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is the list of policies + repeated EgressNetworkPolicy items = 2; +} + +// EgressNetworkPolicyPeer specifies a target to apply egress network policy to +message EgressNetworkPolicyPeer { + // CIDRSelector is the CIDR range to allow/deny traffic to. If this is set, dnsName must be unset + // Ideally we would have liked to use the cidr openapi format for this property. + // But openshift-sdn only supports v4 while specifying the cidr format allows both v4 and v6 cidrs + // We are therefore using a regex pattern to validate instead. + // +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$` + optional string cidrSelector = 1; + + // DNSName is the domain name to allow/deny traffic to. If this is set, cidrSelector must be unset + // +kubebuilder:validation:Pattern=`^([A-Za-z0-9-]+\.)*[A-Za-z0-9-]+\.?$` + optional string dnsName = 2; +} + +// EgressNetworkPolicyRule contains a single egress network policy rule +message EgressNetworkPolicyRule { + // type marks this as an "Allow" or "Deny" rule + optional string type = 1; + + // to is the target that traffic is allowed/denied to + optional EgressNetworkPolicyPeer to = 2; +} + +// EgressNetworkPolicySpec provides a list of policies on outgoing network traffic +message EgressNetworkPolicySpec { + // egress contains the list of egress policy rules + repeated EgressNetworkPolicyRule egress = 1; +} + +// HostSubnet describes the container subnet network on a node. The HostSubnet object must have the +// same name as the Node object it corresponds to. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +kubebuilder:printcolumn:name="Host",type=string,JSONPath=`.host`,description="The name of the node" +// +kubebuilder:printcolumn:name="Host IP",type=string,JSONPath=`.hostIP`,description="The IP address to be used as a VTEP by other nodes in the overlay network" +// +kubebuilder:printcolumn:name="Subnet",type=string,JSONPath=`.subnet`,description="The CIDR range of the overlay network assigned to the node for its pods" +// +kubebuilder:printcolumn:name="Egress CIDRs",type=string,JSONPath=`.egressCIDRs`,description="The network egress CIDRs" +// +kubebuilder:printcolumn:name="Egress IPs",type=string,JSONPath=`.egressIPs`,description="The network egress IP addresses" +// +openshift:compatibility-gen:level=1 +message HostSubnet { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Host is the name of the node. (This is the same as the object's name, but both fields must be set.) + // +kubebuilder:validation:Pattern=`^[a-z0-9.-]+$` + optional string host = 2; + + // HostIP is the IP address to be used as a VTEP by other nodes in the overlay network + // +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])$` + optional string hostIP = 3; + + // Subnet is the CIDR range of the overlay network assigned to the node for its pods + // +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$` + optional string subnet = 4; + + // EgressIPs is the list of automatic egress IP addresses currently hosted by this node. + // If EgressCIDRs is empty, this can be set by hand; if EgressCIDRs is set then the + // master will overwrite the value here with its own allocation of egress IPs. + // +optional + repeated string egressIPs = 5; + + // EgressCIDRs is the list of CIDR ranges available for automatically assigning + // egress IPs to this node from. If this field is set then EgressIPs should be + // treated as read-only. + // +optional + repeated string egressCIDRs = 6; +} + +// HostSubnetList is a collection of HostSubnets +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message HostSubnetList { + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of host subnets + repeated HostSubnet items = 2; +} + +// NetNamespace describes a single isolated network. When using the redhat/openshift-ovs-multitenant +// plugin, every Namespace will have a corresponding NetNamespace object with the same name. +// (When using redhat/openshift-ovs-subnet, NetNamespaces are not used.) +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +kubebuilder:printcolumn:name="NetID",type=integer,JSONPath=`.netid`,description="The network identifier of the network namespace" +// +kubebuilder:printcolumn:name="Egress IPs",type=string,JSONPath=`.egressIPs`,description="The network egress IP addresses" +// +openshift:compatibility-gen:level=1 +message NetNamespace { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // NetName is the name of the network namespace. (This is the same as the object's name, but both fields must be set.) + // +kubebuilder:validation:Pattern=`^[a-z0-9.-]+$` + optional string netname = 2; + + // NetID is the network identifier of the network namespace assigned to each overlay network packet. This can be manipulated with the "oc adm pod-network" commands. + // +kubebuilder:validation:Minimum=0 + // +kubebuilder:validation:Maximum=16777215 + optional uint32 netid = 3; + + // EgressIPs is a list of reserved IPs that will be used as the source for external traffic coming from pods in this namespace. + // (If empty, external traffic will be masqueraded to Node IPs.) + // +optional + repeated string egressIPs = 4; +} + +// NetNamespaceList is a collection of NetNamespaces +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message NetNamespaceList { + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of net namespaces + repeated NetNamespace items = 2; +} + diff --git a/vendor/github.com/openshift/api/network/v1/legacy.go b/vendor/github.com/openshift/api/network/v1/legacy.go new file mode 100644 index 000000000..4395ebf8e --- /dev/null +++ b/vendor/github.com/openshift/api/network/v1/legacy.go @@ -0,0 +1,27 @@ +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + legacyGroupVersion = schema.GroupVersion{Group: "", Version: "v1"} + legacySchemeBuilder = runtime.NewSchemeBuilder(addLegacyKnownTypes) + DeprecatedInstallWithoutGroup = legacySchemeBuilder.AddToScheme +) + +func addLegacyKnownTypes(scheme *runtime.Scheme) error { + types := []runtime.Object{ + &ClusterNetwork{}, + &ClusterNetworkList{}, + &HostSubnet{}, + &HostSubnetList{}, + &NetNamespace{}, + &NetNamespaceList{}, + &EgressNetworkPolicy{}, + &EgressNetworkPolicyList{}, + } + scheme.AddKnownTypes(legacyGroupVersion, types...) + return nil +} diff --git a/vendor/github.com/openshift/api/network/v1/register.go b/vendor/github.com/openshift/api/network/v1/register.go new file mode 100644 index 000000000..80defa764 --- /dev/null +++ b/vendor/github.com/openshift/api/network/v1/register.go @@ -0,0 +1,44 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + GroupName = "network.openshift.io" + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // Install is a function which adds this version to a scheme + Install = schemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = schemeBuilder.AddToScheme +) + +// Resource generated code relies on this being here, but it logically belongs to the group +// DEPRECATED +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &ClusterNetwork{}, + &ClusterNetworkList{}, + &HostSubnet{}, + &HostSubnetList{}, + &NetNamespace{}, + &NetNamespaceList{}, + &EgressNetworkPolicy{}, + &EgressNetworkPolicyList{}, + ) + metav1.AddToGroupVersion(scheme, GroupVersion) + return nil +} diff --git a/vendor/github.com/openshift/api/network/v1/stable.clusternetwork.testsuite.yaml b/vendor/github.com/openshift/api/network/v1/stable.clusternetwork.testsuite.yaml new file mode 100644 index 000000000..1593231c8 --- /dev/null +++ b/vendor/github.com/openshift/api/network/v1/stable.clusternetwork.testsuite.yaml @@ -0,0 +1,16 @@ +apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this +name: "[Stable] ClusterNetwork" +crd: 001-clusternetwork-crd.yaml +tests: + onCreate: + - name: Should be able to create a minimal ClusterNetwork + initial: | + apiVersion: network.openshift.io/v1 + kind: ClusterNetwork + clusterNetworks: [] + serviceNetwork: 1.2.3.4/32 + expected: | + apiVersion: network.openshift.io/v1 + kind: ClusterNetwork + clusterNetworks: [] + serviceNetwork: 1.2.3.4/32 diff --git a/vendor/github.com/openshift/api/network/v1/stable.egressnetworkpolicy.testsuite.yaml b/vendor/github.com/openshift/api/network/v1/stable.egressnetworkpolicy.testsuite.yaml new file mode 100644 index 000000000..6ae75505f --- /dev/null +++ b/vendor/github.com/openshift/api/network/v1/stable.egressnetworkpolicy.testsuite.yaml @@ -0,0 +1,16 @@ +apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this +name: "[Stable] EgressNetworkPolicy" +crd: 004-egressnetworkpolicy-crd.yaml +tests: + onCreate: + - name: Should be able to create a minimal EgressNetworkPolicy + initial: | + apiVersion: network.openshift.io/v1 + kind: EgressNetworkPolicy + spec: + egress: [] + expected: | + apiVersion: network.openshift.io/v1 + kind: EgressNetworkPolicy + spec: + egress: [] diff --git a/vendor/github.com/openshift/api/network/v1/stable.hostsubnet.testsuite.yaml b/vendor/github.com/openshift/api/network/v1/stable.hostsubnet.testsuite.yaml new file mode 100644 index 000000000..4740019da --- /dev/null +++ b/vendor/github.com/openshift/api/network/v1/stable.hostsubnet.testsuite.yaml @@ -0,0 +1,18 @@ +apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this +name: "[Stable] HostSubnet" +crd: 002-hostsubnet-crd.yaml +tests: + onCreate: + - name: Should be able to create a minimal HostSubnet + initial: | + apiVersion: network.openshift.io/v1 + kind: HostSubnet + host: foo + hostIP: 1.2.3.4 + subnet: 1.2.3.0/24 + expected: | + apiVersion: network.openshift.io/v1 + kind: HostSubnet + host: foo + hostIP: 1.2.3.4 + subnet: 1.2.3.0/24 diff --git a/vendor/github.com/openshift/api/network/v1/stable.netnamespace.testsuite.yaml b/vendor/github.com/openshift/api/network/v1/stable.netnamespace.testsuite.yaml new file mode 100644 index 000000000..887ce749b --- /dev/null +++ b/vendor/github.com/openshift/api/network/v1/stable.netnamespace.testsuite.yaml @@ -0,0 +1,16 @@ +apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this +name: "[Stable] NetNamespace" +crd: 003-netnamespace-crd.yaml +tests: + onCreate: + - name: Should be able to create a minimal NetNamespace + initial: | + apiVersion: network.openshift.io/v1 + kind: NetNamespace + netname: foo + netid: 0 + expected: | + apiVersion: network.openshift.io/v1 + kind: NetNamespace + netname: foo + netid: 0 diff --git a/vendor/github.com/openshift/api/network/v1/types.go b/vendor/github.com/openshift/api/network/v1/types.go new file mode 100644 index 000000000..e71c6cf5a --- /dev/null +++ b/vendor/github.com/openshift/api/network/v1/types.go @@ -0,0 +1,300 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + ClusterNetworkDefault = "default" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterNetwork describes the cluster network. There is normally only one object of this type, +// named "default", which is created by the SDN network plugin based on the master configuration +// when the cluster is brought up for the first time. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +kubebuilder:resource:scope="Cluster" +// +kubebuilder:printcolumn:name="Cluster Network",type=string,JSONPath=`.network`,description="The primary cluster network CIDR" +// +kubebuilder:printcolumn:name="Service Network",type=string,JSONPath=`.serviceNetwork`,description="The service network CIDR" +// +kubebuilder:printcolumn:name="Plugin Name",type=string,JSONPath=`.pluginName`,description="The Openshift SDN network plug-in in use" +// +openshift:compatibility-gen:level=1 +type ClusterNetwork struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Network is a CIDR string specifying the global overlay network's L3 space + // +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$` + Network string `json:"network,omitempty" protobuf:"bytes,2,opt,name=network"` + + // HostSubnetLength is the number of bits of network to allocate to each node. eg, 8 would mean that each node would have a /24 slice of the overlay network for its pods + // +kubebuilder:validation:Minimum=2 + // +kubebuilder:validation:Maximum=30 + HostSubnetLength uint32 `json:"hostsubnetlength,omitempty" protobuf:"varint,3,opt,name=hostsubnetlength"` + + // ServiceNetwork is the CIDR range that Service IP addresses are allocated from + // +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$` + ServiceNetwork string `json:"serviceNetwork" protobuf:"bytes,4,opt,name=serviceNetwork"` + + // PluginName is the name of the network plugin being used + PluginName string `json:"pluginName,omitempty" protobuf:"bytes,5,opt,name=pluginName"` + + // ClusterNetworks is a list of ClusterNetwork objects that defines the global overlay network's L3 space by specifying a set of CIDR and netmasks that the SDN can allocate addresses from. + ClusterNetworks []ClusterNetworkEntry `json:"clusterNetworks" protobuf:"bytes,6,rep,name=clusterNetworks"` + + // VXLANPort sets the VXLAN destination port used by the cluster. + // It is set by the master configuration file on startup and cannot be edited manually. + // Valid values for VXLANPort are integers 1-65535 inclusive and if unset defaults to 4789. + // Changing VXLANPort allows users to resolve issues between openshift SDN and other software trying to use the same VXLAN destination port. + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=65535 + // +kubebuilder:validation:Optional + // +optional + VXLANPort *uint32 `json:"vxlanPort,omitempty" protobuf:"varint,7,opt,name=vxlanPort"` + + // MTU is the MTU for the overlay network. This should be 50 less than the MTU of the network connecting the nodes. It is normally autodetected by the cluster network operator. + // +kubebuilder:validation:Minimum=576 + // +kubebuilder:validation:Maximum=65536 + // +kubebuilder:validation:Optional + // +optional + MTU *uint32 `json:"mtu,omitempty" protobuf:"varint,8,opt,name=mtu"` +} + +// ClusterNetworkEntry defines an individual cluster network. The CIDRs cannot overlap with other cluster network CIDRs, CIDRs reserved for external ips, CIDRs reserved for service networks, and CIDRs reserved for ingress ips. +type ClusterNetworkEntry struct { + // CIDR defines the total range of a cluster networks address space. + // +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$` + CIDR string `json:"CIDR" protobuf:"bytes,1,opt,name=cidr"` + + // HostSubnetLength is the number of bits of the accompanying CIDR address to allocate to each node. eg, 8 would mean that each node would have a /24 slice of the overlay network for its pods. + // +kubebuilder:validation:Minimum=2 + // +kubebuilder:validation:Maximum=30 + HostSubnetLength uint32 `json:"hostSubnetLength" protobuf:"varint,2,opt,name=hostSubnetLength"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterNetworkList is a collection of ClusterNetworks +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ClusterNetworkList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of cluster networks + Items []ClusterNetwork `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// HostSubnetEgressIP represents one egress IP address currently hosted on the node represented by +// HostSubnet +// +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])$` +type HostSubnetEgressIP string + +// HostSubnetEgressCIDR represents one egress CIDR from which to assign IP addresses for this node +// represented by the HostSubnet +// +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$` +type HostSubnetEgressCIDR string + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// HostSubnet describes the container subnet network on a node. The HostSubnet object must have the +// same name as the Node object it corresponds to. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +kubebuilder:printcolumn:name="Host",type=string,JSONPath=`.host`,description="The name of the node" +// +kubebuilder:printcolumn:name="Host IP",type=string,JSONPath=`.hostIP`,description="The IP address to be used as a VTEP by other nodes in the overlay network" +// +kubebuilder:printcolumn:name="Subnet",type=string,JSONPath=`.subnet`,description="The CIDR range of the overlay network assigned to the node for its pods" +// +kubebuilder:printcolumn:name="Egress CIDRs",type=string,JSONPath=`.egressCIDRs`,description="The network egress CIDRs" +// +kubebuilder:printcolumn:name="Egress IPs",type=string,JSONPath=`.egressIPs`,description="The network egress IP addresses" +// +openshift:compatibility-gen:level=1 +type HostSubnet struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Host is the name of the node. (This is the same as the object's name, but both fields must be set.) + // +kubebuilder:validation:Pattern=`^[a-z0-9.-]+$` + Host string `json:"host" protobuf:"bytes,2,opt,name=host"` + + // HostIP is the IP address to be used as a VTEP by other nodes in the overlay network + // +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])$` + HostIP string `json:"hostIP" protobuf:"bytes,3,opt,name=hostIP"` + + // Subnet is the CIDR range of the overlay network assigned to the node for its pods + // +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$` + Subnet string `json:"subnet" protobuf:"bytes,4,opt,name=subnet"` + + // EgressIPs is the list of automatic egress IP addresses currently hosted by this node. + // If EgressCIDRs is empty, this can be set by hand; if EgressCIDRs is set then the + // master will overwrite the value here with its own allocation of egress IPs. + // +optional + EgressIPs []HostSubnetEgressIP `json:"egressIPs,omitempty" protobuf:"bytes,5,rep,name=egressIPs"` + + // EgressCIDRs is the list of CIDR ranges available for automatically assigning + // egress IPs to this node from. If this field is set then EgressIPs should be + // treated as read-only. + // +optional + EgressCIDRs []HostSubnetEgressCIDR `json:"egressCIDRs,omitempty" protobuf:"bytes,6,rep,name=egressCIDRs"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// HostSubnetList is a collection of HostSubnets +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type HostSubnetList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of host subnets + Items []HostSubnet `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// NetNamespaceEgressIP is a single egress IP out of a list of reserved IPs used as source of external traffic coming +// from pods in this namespace +// +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])$` +type NetNamespaceEgressIP string + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// NetNamespace describes a single isolated network. When using the redhat/openshift-ovs-multitenant +// plugin, every Namespace will have a corresponding NetNamespace object with the same name. +// (When using redhat/openshift-ovs-subnet, NetNamespaces are not used.) +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +kubebuilder:printcolumn:name="NetID",type=integer,JSONPath=`.netid`,description="The network identifier of the network namespace" +// +kubebuilder:printcolumn:name="Egress IPs",type=string,JSONPath=`.egressIPs`,description="The network egress IP addresses" +// +openshift:compatibility-gen:level=1 +type NetNamespace struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // NetName is the name of the network namespace. (This is the same as the object's name, but both fields must be set.) + // +kubebuilder:validation:Pattern=`^[a-z0-9.-]+$` + NetName string `json:"netname" protobuf:"bytes,2,opt,name=netname"` + + // NetID is the network identifier of the network namespace assigned to each overlay network packet. This can be manipulated with the "oc adm pod-network" commands. + // +kubebuilder:validation:Minimum=0 + // +kubebuilder:validation:Maximum=16777215 + NetID uint32 `json:"netid" protobuf:"varint,3,opt,name=netid"` + + // EgressIPs is a list of reserved IPs that will be used as the source for external traffic coming from pods in this namespace. + // (If empty, external traffic will be masqueraded to Node IPs.) + // +optional + EgressIPs []NetNamespaceEgressIP `json:"egressIPs,omitempty" protobuf:"bytes,4,rep,name=egressIPs"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// NetNamespaceList is a collection of NetNamespaces +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type NetNamespaceList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of net namespaces + Items []NetNamespace `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// EgressNetworkPolicyRuleType indicates whether an EgressNetworkPolicyRule allows or denies traffic +// +kubebuilder:validation:Pattern=`^Allow|Deny$` +type EgressNetworkPolicyRuleType string + +const ( + EgressNetworkPolicyRuleAllow EgressNetworkPolicyRuleType = "Allow" + EgressNetworkPolicyRuleDeny EgressNetworkPolicyRuleType = "Deny" +) + +// EgressNetworkPolicyPeer specifies a target to apply egress network policy to +type EgressNetworkPolicyPeer struct { + // CIDRSelector is the CIDR range to allow/deny traffic to. If this is set, dnsName must be unset + // Ideally we would have liked to use the cidr openapi format for this property. + // But openshift-sdn only supports v4 while specifying the cidr format allows both v4 and v6 cidrs + // We are therefore using a regex pattern to validate instead. + // +kubebuilder:validation:Pattern=`^(([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[0-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])/([0-9]|[12][0-9]|3[0-2])$` + CIDRSelector string `json:"cidrSelector,omitempty" protobuf:"bytes,1,rep,name=cidrSelector"` + // DNSName is the domain name to allow/deny traffic to. If this is set, cidrSelector must be unset + // +kubebuilder:validation:Pattern=`^([A-Za-z0-9-]+\.)*[A-Za-z0-9-]+\.?$` + DNSName string `json:"dnsName,omitempty" protobuf:"bytes,2,rep,name=dnsName"` +} + +// EgressNetworkPolicyRule contains a single egress network policy rule +type EgressNetworkPolicyRule struct { + // type marks this as an "Allow" or "Deny" rule + Type EgressNetworkPolicyRuleType `json:"type" protobuf:"bytes,1,rep,name=type"` + // to is the target that traffic is allowed/denied to + To EgressNetworkPolicyPeer `json:"to" protobuf:"bytes,2,rep,name=to"` +} + +// EgressNetworkPolicySpec provides a list of policies on outgoing network traffic +type EgressNetworkPolicySpec struct { + // egress contains the list of egress policy rules + Egress []EgressNetworkPolicyRule `json:"egress" protobuf:"bytes,1,rep,name=egress"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// EgressNetworkPolicy describes the current egress network policy for a Namespace. When using +// the 'redhat/openshift-ovs-multitenant' network plugin, traffic from a pod to an IP address +// outside the cluster will be checked against each EgressNetworkPolicyRule in the pod's +// namespace's EgressNetworkPolicy, in order. If no rule matches (or no EgressNetworkPolicy +// is present) then the traffic will be allowed by default. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type EgressNetworkPolicy struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // spec is the specification of the current egress network policy + Spec EgressNetworkPolicySpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// EgressNetworkPolicyList is a collection of EgressNetworkPolicy +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type EgressNetworkPolicyList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is the list of policies + Items []EgressNetworkPolicy `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/github.com/openshift/api/network/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/network/v1/zz_generated.deepcopy.go new file mode 100644 index 000000000..ab6eb72aa --- /dev/null +++ b/vendor/github.com/openshift/api/network/v1/zz_generated.deepcopy.go @@ -0,0 +1,347 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterNetwork) DeepCopyInto(out *ClusterNetwork) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.ClusterNetworks != nil { + in, out := &in.ClusterNetworks, &out.ClusterNetworks + *out = make([]ClusterNetworkEntry, len(*in)) + copy(*out, *in) + } + if in.VXLANPort != nil { + in, out := &in.VXLANPort, &out.VXLANPort + *out = new(uint32) + **out = **in + } + if in.MTU != nil { + in, out := &in.MTU, &out.MTU + *out = new(uint32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterNetwork. +func (in *ClusterNetwork) DeepCopy() *ClusterNetwork { + if in == nil { + return nil + } + out := new(ClusterNetwork) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterNetwork) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterNetworkEntry) DeepCopyInto(out *ClusterNetworkEntry) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterNetworkEntry. +func (in *ClusterNetworkEntry) DeepCopy() *ClusterNetworkEntry { + if in == nil { + return nil + } + out := new(ClusterNetworkEntry) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterNetworkList) DeepCopyInto(out *ClusterNetworkList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterNetwork, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterNetworkList. +func (in *ClusterNetworkList) DeepCopy() *ClusterNetworkList { + if in == nil { + return nil + } + out := new(ClusterNetworkList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterNetworkList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EgressNetworkPolicy) DeepCopyInto(out *EgressNetworkPolicy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EgressNetworkPolicy. +func (in *EgressNetworkPolicy) DeepCopy() *EgressNetworkPolicy { + if in == nil { + return nil + } + out := new(EgressNetworkPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EgressNetworkPolicy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EgressNetworkPolicyList) DeepCopyInto(out *EgressNetworkPolicyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]EgressNetworkPolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EgressNetworkPolicyList. +func (in *EgressNetworkPolicyList) DeepCopy() *EgressNetworkPolicyList { + if in == nil { + return nil + } + out := new(EgressNetworkPolicyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EgressNetworkPolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EgressNetworkPolicyPeer) DeepCopyInto(out *EgressNetworkPolicyPeer) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EgressNetworkPolicyPeer. +func (in *EgressNetworkPolicyPeer) DeepCopy() *EgressNetworkPolicyPeer { + if in == nil { + return nil + } + out := new(EgressNetworkPolicyPeer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EgressNetworkPolicyRule) DeepCopyInto(out *EgressNetworkPolicyRule) { + *out = *in + out.To = in.To + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EgressNetworkPolicyRule. +func (in *EgressNetworkPolicyRule) DeepCopy() *EgressNetworkPolicyRule { + if in == nil { + return nil + } + out := new(EgressNetworkPolicyRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EgressNetworkPolicySpec) DeepCopyInto(out *EgressNetworkPolicySpec) { + *out = *in + if in.Egress != nil { + in, out := &in.Egress, &out.Egress + *out = make([]EgressNetworkPolicyRule, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EgressNetworkPolicySpec. +func (in *EgressNetworkPolicySpec) DeepCopy() *EgressNetworkPolicySpec { + if in == nil { + return nil + } + out := new(EgressNetworkPolicySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostSubnet) DeepCopyInto(out *HostSubnet) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.EgressIPs != nil { + in, out := &in.EgressIPs, &out.EgressIPs + *out = make([]HostSubnetEgressIP, len(*in)) + copy(*out, *in) + } + if in.EgressCIDRs != nil { + in, out := &in.EgressCIDRs, &out.EgressCIDRs + *out = make([]HostSubnetEgressCIDR, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostSubnet. +func (in *HostSubnet) DeepCopy() *HostSubnet { + if in == nil { + return nil + } + out := new(HostSubnet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HostSubnet) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostSubnetList) DeepCopyInto(out *HostSubnetList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]HostSubnet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostSubnetList. +func (in *HostSubnetList) DeepCopy() *HostSubnetList { + if in == nil { + return nil + } + out := new(HostSubnetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HostSubnetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetNamespace) DeepCopyInto(out *NetNamespace) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.EgressIPs != nil { + in, out := &in.EgressIPs, &out.EgressIPs + *out = make([]NetNamespaceEgressIP, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetNamespace. +func (in *NetNamespace) DeepCopy() *NetNamespace { + if in == nil { + return nil + } + out := new(NetNamespace) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NetNamespace) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetNamespaceList) DeepCopyInto(out *NetNamespaceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]NetNamespace, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetNamespaceList. +func (in *NetNamespaceList) DeepCopy() *NetNamespaceList { + if in == nil { + return nil + } + out := new(NetNamespaceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NetNamespaceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/vendor/github.com/openshift/api/network/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/network/v1/zz_generated.swagger_doc_generated.go new file mode 100644 index 000000000..f92172aca --- /dev/null +++ b/vendor/github.com/openshift/api/network/v1/zz_generated.swagger_doc_generated.go @@ -0,0 +1,145 @@ +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_ClusterNetwork = map[string]string{ + "": "ClusterNetwork describes the cluster network. There is normally only one object of this type, named \"default\", which is created by the SDN network plugin based on the master configuration when the cluster is brought up for the first time.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "network": "Network is a CIDR string specifying the global overlay network's L3 space", + "hostsubnetlength": "HostSubnetLength is the number of bits of network to allocate to each node. eg, 8 would mean that each node would have a /24 slice of the overlay network for its pods", + "serviceNetwork": "ServiceNetwork is the CIDR range that Service IP addresses are allocated from", + "pluginName": "PluginName is the name of the network plugin being used", + "clusterNetworks": "ClusterNetworks is a list of ClusterNetwork objects that defines the global overlay network's L3 space by specifying a set of CIDR and netmasks that the SDN can allocate addresses from.", + "vxlanPort": "VXLANPort sets the VXLAN destination port used by the cluster. It is set by the master configuration file on startup and cannot be edited manually. Valid values for VXLANPort are integers 1-65535 inclusive and if unset defaults to 4789. Changing VXLANPort allows users to resolve issues between openshift SDN and other software trying to use the same VXLAN destination port.", + "mtu": "MTU is the MTU for the overlay network. This should be 50 less than the MTU of the network connecting the nodes. It is normally autodetected by the cluster network operator.", +} + +func (ClusterNetwork) SwaggerDoc() map[string]string { + return map_ClusterNetwork +} + +var map_ClusterNetworkEntry = map[string]string{ + "": "ClusterNetworkEntry defines an individual cluster network. The CIDRs cannot overlap with other cluster network CIDRs, CIDRs reserved for external ips, CIDRs reserved for service networks, and CIDRs reserved for ingress ips.", + "CIDR": "CIDR defines the total range of a cluster networks address space.", + "hostSubnetLength": "HostSubnetLength is the number of bits of the accompanying CIDR address to allocate to each node. eg, 8 would mean that each node would have a /24 slice of the overlay network for its pods.", +} + +func (ClusterNetworkEntry) SwaggerDoc() map[string]string { + return map_ClusterNetworkEntry +} + +var map_ClusterNetworkList = map[string]string{ + "": "ClusterNetworkList is a collection of ClusterNetworks\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "Items is the list of cluster networks", +} + +func (ClusterNetworkList) SwaggerDoc() map[string]string { + return map_ClusterNetworkList +} + +var map_EgressNetworkPolicy = map[string]string{ + "": "EgressNetworkPolicy describes the current egress network policy for a Namespace. When using the 'redhat/openshift-ovs-multitenant' network plugin, traffic from a pod to an IP address outside the cluster will be checked against each EgressNetworkPolicyRule in the pod's namespace's EgressNetworkPolicy, in order. If no rule matches (or no EgressNetworkPolicy is present) then the traffic will be allowed by default.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec is the specification of the current egress network policy", +} + +func (EgressNetworkPolicy) SwaggerDoc() map[string]string { + return map_EgressNetworkPolicy +} + +var map_EgressNetworkPolicyList = map[string]string{ + "": "EgressNetworkPolicyList is a collection of EgressNetworkPolicy\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is the list of policies", +} + +func (EgressNetworkPolicyList) SwaggerDoc() map[string]string { + return map_EgressNetworkPolicyList +} + +var map_EgressNetworkPolicyPeer = map[string]string{ + "": "EgressNetworkPolicyPeer specifies a target to apply egress network policy to", + "cidrSelector": "CIDRSelector is the CIDR range to allow/deny traffic to. If this is set, dnsName must be unset Ideally we would have liked to use the cidr openapi format for this property. But openshift-sdn only supports v4 while specifying the cidr format allows both v4 and v6 cidrs We are therefore using a regex pattern to validate instead.", + "dnsName": "DNSName is the domain name to allow/deny traffic to. If this is set, cidrSelector must be unset", +} + +func (EgressNetworkPolicyPeer) SwaggerDoc() map[string]string { + return map_EgressNetworkPolicyPeer +} + +var map_EgressNetworkPolicyRule = map[string]string{ + "": "EgressNetworkPolicyRule contains a single egress network policy rule", + "type": "type marks this as an \"Allow\" or \"Deny\" rule", + "to": "to is the target that traffic is allowed/denied to", +} + +func (EgressNetworkPolicyRule) SwaggerDoc() map[string]string { + return map_EgressNetworkPolicyRule +} + +var map_EgressNetworkPolicySpec = map[string]string{ + "": "EgressNetworkPolicySpec provides a list of policies on outgoing network traffic", + "egress": "egress contains the list of egress policy rules", +} + +func (EgressNetworkPolicySpec) SwaggerDoc() map[string]string { + return map_EgressNetworkPolicySpec +} + +var map_HostSubnet = map[string]string{ + "": "HostSubnet describes the container subnet network on a node. The HostSubnet object must have the same name as the Node object it corresponds to.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "host": "Host is the name of the node. (This is the same as the object's name, but both fields must be set.)", + "hostIP": "HostIP is the IP address to be used as a VTEP by other nodes in the overlay network", + "subnet": "Subnet is the CIDR range of the overlay network assigned to the node for its pods", + "egressIPs": "EgressIPs is the list of automatic egress IP addresses currently hosted by this node. If EgressCIDRs is empty, this can be set by hand; if EgressCIDRs is set then the master will overwrite the value here with its own allocation of egress IPs.", + "egressCIDRs": "EgressCIDRs is the list of CIDR ranges available for automatically assigning egress IPs to this node from. If this field is set then EgressIPs should be treated as read-only.", +} + +func (HostSubnet) SwaggerDoc() map[string]string { + return map_HostSubnet +} + +var map_HostSubnetList = map[string]string{ + "": "HostSubnetList is a collection of HostSubnets\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "Items is the list of host subnets", +} + +func (HostSubnetList) SwaggerDoc() map[string]string { + return map_HostSubnetList +} + +var map_NetNamespace = map[string]string{ + "": "NetNamespace describes a single isolated network. When using the redhat/openshift-ovs-multitenant plugin, every Namespace will have a corresponding NetNamespace object with the same name. (When using redhat/openshift-ovs-subnet, NetNamespaces are not used.)\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "netname": "NetName is the name of the network namespace. (This is the same as the object's name, but both fields must be set.)", + "netid": "NetID is the network identifier of the network namespace assigned to each overlay network packet. This can be manipulated with the \"oc adm pod-network\" commands.", + "egressIPs": "EgressIPs is a list of reserved IPs that will be used as the source for external traffic coming from pods in this namespace. (If empty, external traffic will be masqueraded to Node IPs.)", +} + +func (NetNamespace) SwaggerDoc() map[string]string { + return map_NetNamespace +} + +var map_NetNamespaceList = map[string]string{ + "": "NetNamespaceList is a collection of NetNamespaces\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "Items is the list of net namespaces", +} + +func (NetNamespaceList) SwaggerDoc() map[string]string { + return map_NetNamespaceList +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/api/networkoperator/.codegen.yaml b/vendor/github.com/openshift/api/networkoperator/.codegen.yaml new file mode 100644 index 000000000..ffa2c8d9b --- /dev/null +++ b/vendor/github.com/openshift/api/networkoperator/.codegen.yaml @@ -0,0 +1,2 @@ +swaggerdocs: + commentPolicy: Warn diff --git a/vendor/github.com/openshift/api/networkoperator/OWNERS b/vendor/github.com/openshift/api/networkoperator/OWNERS new file mode 100644 index 000000000..6148b9f77 --- /dev/null +++ b/vendor/github.com/openshift/api/networkoperator/OWNERS @@ -0,0 +1,5 @@ +reviewers: + - danwinship + - dcbw + - knobunc + - squeed diff --git a/vendor/github.com/openshift/api/networkoperator/install.go b/vendor/github.com/openshift/api/networkoperator/install.go new file mode 100644 index 000000000..b06383bf4 --- /dev/null +++ b/vendor/github.com/openshift/api/networkoperator/install.go @@ -0,0 +1,26 @@ +package networkoperator + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + networkoperatorv1 "github.com/openshift/api/networkoperator/v1" +) + +const ( + GroupName = "network.operator.openshift.io" +) + +var ( + schemeBuilder = runtime.NewSchemeBuilder(networkoperatorv1.Install) + // Install is a function which adds every version of this group to a scheme + Install = schemeBuilder.AddToScheme +) + +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +func Kind(kind string) schema.GroupKind { + return schema.GroupKind{Group: GroupName, Kind: kind} +} diff --git a/vendor/github.com/openshift/api/networkoperator/v1/001-egressrouter.crd.yaml b/vendor/github.com/openshift/api/networkoperator/v1/001-egressrouter.crd.yaml new file mode 100644 index 000000000..61c7b59cf --- /dev/null +++ b/vendor/github.com/openshift/api/networkoperator/v1/001-egressrouter.crd.yaml @@ -0,0 +1,208 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/851 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + creationTimestamp: null + name: egressrouters.network.operator.openshift.io +spec: + group: network.operator.openshift.io + names: + kind: EgressRouter + listKind: EgressRouterList + plural: egressrouters + singular: egressrouter + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[*].type + name: Condition + type: string + - jsonPath: .status.conditions[*].status + name: Status + type: string + name: v1 + schema: + openAPIV3Schema: + description: "EgressRouter is a feature allowing the user to define an egress router that acts as a bridge between pods and external systems. The egress router runs a service that redirects egress traffic originating from a pod or a group of pods to a remote external system or multiple destinations as per configuration. \n It is consumed by the cluster-network-operator. More specifically, given an EgressRouter CR with , the CNO will create and manage: - A service called - An egress pod called - A NAD called \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). \n EgressRouter is a single egressrouter pod configuration object." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Specification of the desired egress router. + oneOf: + - properties: + mode: + enum: + - Redirect + required: + - redirect + properties: + addresses: + description: List of IP addresses to configure on the pod's secondary interface. + items: + description: EgressRouterAddress contains a pair of IP CIDR and gateway to be configured on the router's interface + properties: + gateway: + anyOf: + - format: ipv4 + - format: ipv6 + description: IP address of the next-hop gateway, if it cannot be automatically determined. Can be IPv4 or IPv6. + type: string + ip: + description: IP is the address to configure on the router's interface. Can be IPv4 or IPv6. + type: string + required: + - ip + type: object + type: array + mode: + default: Redirect + description: Mode depicts the mode that is used for the egress router. The default mode is "Redirect" and is the only supported mode currently. + enum: + - Redirect + type: string + networkInterface: + default: + macvlan: + mode: Bridge + description: Specification of interface to create/use. The default is macvlan. Currently only macvlan is supported. + oneOf: + - required: + - macvlan + properties: + macvlan: + default: + mode: Bridge + description: Arguments specific to the interfaceType macvlan + properties: + master: + description: Name of the master interface. Need not be specified if it can be inferred from the IP address. + type: string + mode: + default: Bridge + description: Mode depicts the mode that is used for the macvlan interface; one of Bridge|Private|VEPA|Passthru. The default mode is "Bridge". + enum: + - Bridge + - Private + - VEPA + - Passthru + type: string + required: + - mode + type: object + type: object + redirect: + description: Redirect represents the configuration parameters specific to redirect mode. + properties: + fallbackIP: + anyOf: + - format: ipv4 + - format: ipv6 + description: FallbackIP specifies the remote destination's IP address. Can be IPv4 or IPv6. If no redirect rules are specified, all traffic from the router are redirected to this IP. If redirect rules are specified, then any connections on any other port (undefined in the rules) on the router will be redirected to this IP. If redirect rules are specified and no fallback IP is provided, connections on other ports will simply be rejected. + type: string + redirectRules: + description: List of L4RedirectRules that define the DNAT redirection from the pod to the destination in redirect mode. + items: + description: L4RedirectRule defines a DNAT redirection from a given port to a destination IP and port. + properties: + destinationIP: + anyOf: + - format: ipv4 + - format: ipv6 + description: IP specifies the remote destination's IP address. Can be IPv4 or IPv6. + type: string + port: + description: Port is the port number to which clients should send traffic to be redirected. + format: int32 + maximum: 65535 + minimum: 1 + type: integer + protocol: + description: Protocol can be TCP, SCTP or UDP. + enum: + - TCP + - UDP + - SCTP + type: string + targetPort: + description: TargetPort allows specifying the port number on the remote destination to which the traffic gets redirected to. If unspecified, the value from "Port" is used. + format: int32 + maximum: 65535 + minimum: 1 + type: integer + required: + - destinationIP + - port + - protocol + type: object + type: array + type: object + required: + - addresses + - mode + - networkInterface + type: object + status: + description: Observed status of EgressRouter. + properties: + conditions: + description: Observed status of the egress router + items: + description: EgressRouterStatusCondition represents the state of the egress router's managed and monitored components. + properties: + lastTransitionTime: + description: LastTransitionTime is the time of the last update to the current status property. + format: date-time + nullable: true + type: string + message: + description: Message provides additional information about the current condition. This is only to be consumed by humans. It may contain Line Feed characters (U+000A), which should be rendered as new lines. + type: string + reason: + description: Reason is the CamelCase reason for the condition's current status. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: Type specifies the aspect reported by this condition; one of Available, Progressing, Degraded + enum: + - Available + - Progressing + - Degraded + type: string + required: + - lastTransitionTime + - status + - type + type: object + type: array + required: + - conditions + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/vendor/github.com/openshift/api/networkoperator/v1/001-egressrouter.crd.yaml-patch b/vendor/github.com/openshift/api/networkoperator/v1/001-egressrouter.crd.yaml-patch new file mode 100644 index 000000000..3f1cc0342 --- /dev/null +++ b/vendor/github.com/openshift/api/networkoperator/v1/001-egressrouter.crd.yaml-patch @@ -0,0 +1,26 @@ +- op: add + path: /spec/versions/name=v1/schema/openAPIV3Schema/properties/spec/properties/addresses/items/properties/gateway/anyOf + value: + - format: ipv4 + - format: ipv6 +- op: add + path: /spec/versions/name=v1/schema/openAPIV3Schema/properties/spec/properties/redirect/properties/fallbackIP/anyOf + value: + - format: ipv4 + - format: ipv6 +- op: add + path: /spec/versions/name=v1/schema/openAPIV3Schema/properties/spec/properties/redirect/properties/redirectRules/items/properties/destinationIP/anyOf + value: + - format: ipv4 + - format: ipv6 +- op: add + path: /spec/versions/name=v1/schema/openAPIV3Schema/properties/spec/oneOf + value: + - properties: + mode: + enum: ["Redirect"] + required: ["redirect"] +- op: add + path: /spec/versions/name=v1/schema/openAPIV3Schema/properties/spec/properties/networkInterface/oneOf + value: + - required: ["macvlan"] diff --git a/vendor/github.com/openshift/api/networkoperator/v1/Makefile b/vendor/github.com/openshift/api/networkoperator/v1/Makefile new file mode 100644 index 000000000..96c9e1639 --- /dev/null +++ b/vendor/github.com/openshift/api/networkoperator/v1/Makefile @@ -0,0 +1,3 @@ +.PHONY: test +test: + make -C ../../tests test GINKGO_EXTRA_ARGS=--focus="network.operator.openshift.io/v1" diff --git a/vendor/github.com/openshift/api/networkoperator/v1/doc.go b/vendor/github.com/openshift/api/networkoperator/v1/doc.go new file mode 100644 index 000000000..3c958bbc6 --- /dev/null +++ b/vendor/github.com/openshift/api/networkoperator/v1/doc.go @@ -0,0 +1,5 @@ +// Package v1 contains API Schema definitions for the network v1 API group +// +k8s:deepcopy-gen=package,register +// +groupName=network.operator.openshift.io +// +kubebuilder:validation:Optional +package v1 diff --git a/vendor/github.com/openshift/api/networkoperator/v1/generated.pb.go b/vendor/github.com/openshift/api/networkoperator/v1/generated.pb.go new file mode 100644 index 000000000..e27523704 --- /dev/null +++ b/vendor/github.com/openshift/api/networkoperator/v1/generated.pb.go @@ -0,0 +1,2552 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/openshift/api/networkoperator/v1/generated.proto + +package v1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *EgressRouter) Reset() { *m = EgressRouter{} } +func (*EgressRouter) ProtoMessage() {} +func (*EgressRouter) Descriptor() ([]byte, []int) { + return fileDescriptor_4bddfca96304d190, []int{0} +} +func (m *EgressRouter) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EgressRouter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EgressRouter) XXX_Merge(src proto.Message) { + xxx_messageInfo_EgressRouter.Merge(m, src) +} +func (m *EgressRouter) XXX_Size() int { + return m.Size() +} +func (m *EgressRouter) XXX_DiscardUnknown() { + xxx_messageInfo_EgressRouter.DiscardUnknown(m) +} + +var xxx_messageInfo_EgressRouter proto.InternalMessageInfo + +func (m *EgressRouterAddress) Reset() { *m = EgressRouterAddress{} } +func (*EgressRouterAddress) ProtoMessage() {} +func (*EgressRouterAddress) Descriptor() ([]byte, []int) { + return fileDescriptor_4bddfca96304d190, []int{1} +} +func (m *EgressRouterAddress) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EgressRouterAddress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EgressRouterAddress) XXX_Merge(src proto.Message) { + xxx_messageInfo_EgressRouterAddress.Merge(m, src) +} +func (m *EgressRouterAddress) XXX_Size() int { + return m.Size() +} +func (m *EgressRouterAddress) XXX_DiscardUnknown() { + xxx_messageInfo_EgressRouterAddress.DiscardUnknown(m) +} + +var xxx_messageInfo_EgressRouterAddress proto.InternalMessageInfo + +func (m *EgressRouterInterface) Reset() { *m = EgressRouterInterface{} } +func (*EgressRouterInterface) ProtoMessage() {} +func (*EgressRouterInterface) Descriptor() ([]byte, []int) { + return fileDescriptor_4bddfca96304d190, []int{2} +} +func (m *EgressRouterInterface) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EgressRouterInterface) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EgressRouterInterface) XXX_Merge(src proto.Message) { + xxx_messageInfo_EgressRouterInterface.Merge(m, src) +} +func (m *EgressRouterInterface) XXX_Size() int { + return m.Size() +} +func (m *EgressRouterInterface) XXX_DiscardUnknown() { + xxx_messageInfo_EgressRouterInterface.DiscardUnknown(m) +} + +var xxx_messageInfo_EgressRouterInterface proto.InternalMessageInfo + +func (m *EgressRouterList) Reset() { *m = EgressRouterList{} } +func (*EgressRouterList) ProtoMessage() {} +func (*EgressRouterList) Descriptor() ([]byte, []int) { + return fileDescriptor_4bddfca96304d190, []int{3} +} +func (m *EgressRouterList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EgressRouterList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EgressRouterList) XXX_Merge(src proto.Message) { + xxx_messageInfo_EgressRouterList.Merge(m, src) +} +func (m *EgressRouterList) XXX_Size() int { + return m.Size() +} +func (m *EgressRouterList) XXX_DiscardUnknown() { + xxx_messageInfo_EgressRouterList.DiscardUnknown(m) +} + +var xxx_messageInfo_EgressRouterList proto.InternalMessageInfo + +func (m *EgressRouterSpec) Reset() { *m = EgressRouterSpec{} } +func (*EgressRouterSpec) ProtoMessage() {} +func (*EgressRouterSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_4bddfca96304d190, []int{4} +} +func (m *EgressRouterSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EgressRouterSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EgressRouterSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_EgressRouterSpec.Merge(m, src) +} +func (m *EgressRouterSpec) XXX_Size() int { + return m.Size() +} +func (m *EgressRouterSpec) XXX_DiscardUnknown() { + xxx_messageInfo_EgressRouterSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_EgressRouterSpec proto.InternalMessageInfo + +func (m *EgressRouterStatus) Reset() { *m = EgressRouterStatus{} } +func (*EgressRouterStatus) ProtoMessage() {} +func (*EgressRouterStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_4bddfca96304d190, []int{5} +} +func (m *EgressRouterStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EgressRouterStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EgressRouterStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_EgressRouterStatus.Merge(m, src) +} +func (m *EgressRouterStatus) XXX_Size() int { + return m.Size() +} +func (m *EgressRouterStatus) XXX_DiscardUnknown() { + xxx_messageInfo_EgressRouterStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_EgressRouterStatus proto.InternalMessageInfo + +func (m *EgressRouterStatusCondition) Reset() { *m = EgressRouterStatusCondition{} } +func (*EgressRouterStatusCondition) ProtoMessage() {} +func (*EgressRouterStatusCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_4bddfca96304d190, []int{6} +} +func (m *EgressRouterStatusCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EgressRouterStatusCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EgressRouterStatusCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_EgressRouterStatusCondition.Merge(m, src) +} +func (m *EgressRouterStatusCondition) XXX_Size() int { + return m.Size() +} +func (m *EgressRouterStatusCondition) XXX_DiscardUnknown() { + xxx_messageInfo_EgressRouterStatusCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_EgressRouterStatusCondition proto.InternalMessageInfo + +func (m *L4RedirectRule) Reset() { *m = L4RedirectRule{} } +func (*L4RedirectRule) ProtoMessage() {} +func (*L4RedirectRule) Descriptor() ([]byte, []int) { + return fileDescriptor_4bddfca96304d190, []int{7} +} +func (m *L4RedirectRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *L4RedirectRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *L4RedirectRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_L4RedirectRule.Merge(m, src) +} +func (m *L4RedirectRule) XXX_Size() int { + return m.Size() +} +func (m *L4RedirectRule) XXX_DiscardUnknown() { + xxx_messageInfo_L4RedirectRule.DiscardUnknown(m) +} + +var xxx_messageInfo_L4RedirectRule proto.InternalMessageInfo + +func (m *MacvlanConfig) Reset() { *m = MacvlanConfig{} } +func (*MacvlanConfig) ProtoMessage() {} +func (*MacvlanConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_4bddfca96304d190, []int{8} +} +func (m *MacvlanConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MacvlanConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MacvlanConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_MacvlanConfig.Merge(m, src) +} +func (m *MacvlanConfig) XXX_Size() int { + return m.Size() +} +func (m *MacvlanConfig) XXX_DiscardUnknown() { + xxx_messageInfo_MacvlanConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_MacvlanConfig proto.InternalMessageInfo + +func (m *RedirectConfig) Reset() { *m = RedirectConfig{} } +func (*RedirectConfig) ProtoMessage() {} +func (*RedirectConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_4bddfca96304d190, []int{9} +} +func (m *RedirectConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RedirectConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RedirectConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_RedirectConfig.Merge(m, src) +} +func (m *RedirectConfig) XXX_Size() int { + return m.Size() +} +func (m *RedirectConfig) XXX_DiscardUnknown() { + xxx_messageInfo_RedirectConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_RedirectConfig proto.InternalMessageInfo + +func init() { + proto.RegisterType((*EgressRouter)(nil), "github.com.openshift.api.networkoperator.v1.EgressRouter") + proto.RegisterType((*EgressRouterAddress)(nil), "github.com.openshift.api.networkoperator.v1.EgressRouterAddress") + proto.RegisterType((*EgressRouterInterface)(nil), "github.com.openshift.api.networkoperator.v1.EgressRouterInterface") + proto.RegisterType((*EgressRouterList)(nil), "github.com.openshift.api.networkoperator.v1.EgressRouterList") + proto.RegisterType((*EgressRouterSpec)(nil), "github.com.openshift.api.networkoperator.v1.EgressRouterSpec") + proto.RegisterType((*EgressRouterStatus)(nil), "github.com.openshift.api.networkoperator.v1.EgressRouterStatus") + proto.RegisterType((*EgressRouterStatusCondition)(nil), "github.com.openshift.api.networkoperator.v1.EgressRouterStatusCondition") + proto.RegisterType((*L4RedirectRule)(nil), "github.com.openshift.api.networkoperator.v1.L4RedirectRule") + proto.RegisterType((*MacvlanConfig)(nil), "github.com.openshift.api.networkoperator.v1.MacvlanConfig") + proto.RegisterType((*RedirectConfig)(nil), "github.com.openshift.api.networkoperator.v1.RedirectConfig") +} + +func init() { + proto.RegisterFile("github.com/openshift/api/networkoperator/v1/generated.proto", fileDescriptor_4bddfca96304d190) +} + +var fileDescriptor_4bddfca96304d190 = []byte{ + // 960 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0xcf, 0x6f, 0x23, 0x35, + 0x14, 0xce, 0xa4, 0x49, 0x9b, 0xba, 0x4d, 0xb7, 0x18, 0xad, 0x14, 0x65, 0xa5, 0xa4, 0xcc, 0x61, + 0xb5, 0xb0, 0x62, 0x86, 0x56, 0x15, 0x62, 0x29, 0x08, 0x76, 0xca, 0xaf, 0x48, 0x0d, 0x04, 0xd3, + 0x13, 0x5a, 0x01, 0xee, 0x8c, 0x33, 0x19, 0x92, 0x19, 0x0f, 0xb6, 0x93, 0xa5, 0x12, 0x42, 0x1c, + 0x39, 0x22, 0xfe, 0x0f, 0xfe, 0x01, 0x0e, 0x9c, 0x7b, 0xec, 0x71, 0x4f, 0x11, 0x0d, 0x17, 0xfe, + 0x86, 0x9e, 0x90, 0x3d, 0x9e, 0x5f, 0xed, 0xb2, 0x6a, 0xba, 0xb7, 0xf8, 0xf9, 0x7d, 0xdf, 0xf7, + 0xfc, 0xfc, 0xf9, 0x4d, 0xc0, 0x81, 0x1f, 0x88, 0xd1, 0xf4, 0xc4, 0x72, 0x69, 0x68, 0xd3, 0x98, + 0x44, 0x7c, 0x14, 0x0c, 0x85, 0x8d, 0xe3, 0xc0, 0x8e, 0x88, 0x78, 0x4a, 0xd9, 0x98, 0xc6, 0x84, + 0x61, 0x41, 0x99, 0x3d, 0xdb, 0xb5, 0x7d, 0x12, 0xc9, 0x05, 0xf1, 0xac, 0x98, 0x51, 0x41, 0xe1, + 0xc3, 0x1c, 0x6c, 0x65, 0x60, 0x0b, 0xc7, 0x81, 0x75, 0x05, 0x6c, 0xcd, 0x76, 0xdb, 0x6f, 0x16, + 0x94, 0x7c, 0xea, 0x53, 0x5b, 0x71, 0x9c, 0x4c, 0x87, 0x6a, 0xa5, 0x16, 0xea, 0x57, 0xc2, 0xdd, + 0xde, 0x1f, 0xbf, 0xc3, 0xad, 0x80, 0xca, 0x52, 0x42, 0xec, 0x8e, 0x82, 0x88, 0xb0, 0x53, 0x3b, + 0x1e, 0xfb, 0x32, 0xc0, 0xed, 0x90, 0x08, 0xfc, 0x9c, 0x8a, 0xda, 0x6f, 0xff, 0x1f, 0x8a, 0x4d, + 0x23, 0x11, 0x84, 0xc4, 0xe6, 0xee, 0x88, 0x84, 0xf8, 0x2a, 0xce, 0xfc, 0xb3, 0x0a, 0x36, 0x3f, + 0xf6, 0x19, 0xe1, 0x1c, 0xd1, 0xa9, 0x20, 0x0c, 0x7e, 0x07, 0x1a, 0x52, 0xc3, 0xc3, 0x02, 0xb7, + 0x8c, 0x1d, 0xe3, 0xc1, 0xc6, 0xde, 0x5b, 0x56, 0xc2, 0x6d, 0x15, 0xb9, 0xad, 0x78, 0xec, 0xcb, + 0x00, 0xb7, 0x64, 0xb6, 0x35, 0xdb, 0xb5, 0xbe, 0x38, 0xf9, 0x9e, 0xb8, 0xa2, 0x4f, 0x04, 0x76, + 0xe0, 0xd9, 0xbc, 0x5b, 0x59, 0xcc, 0xbb, 0x20, 0x8f, 0xa1, 0x8c, 0x15, 0x7e, 0x0b, 0x6a, 0x3c, + 0x26, 0x6e, 0xab, 0xaa, 0xd8, 0xdf, 0xb7, 0x96, 0xe8, 0xa5, 0x55, 0x2c, 0xf5, 0xab, 0x98, 0xb8, + 0xce, 0xa6, 0x96, 0xaa, 0xc9, 0x15, 0x52, 0xc4, 0xd0, 0x07, 0xab, 0x5c, 0x60, 0x31, 0xe5, 0xad, + 0x15, 0x25, 0xf1, 0xc1, 0xed, 0x25, 0x14, 0x8d, 0xb3, 0xa5, 0x45, 0x56, 0x93, 0x35, 0xd2, 0xf4, + 0xe6, 0x13, 0xf0, 0x6a, 0x31, 0xfb, 0xb1, 0xe7, 0xc9, 0x05, 0x6c, 0x83, 0x6a, 0x10, 0xab, 0xe6, + 0xad, 0x3b, 0x40, 0x43, 0xab, 0xbd, 0x01, 0xaa, 0x06, 0x31, 0x7c, 0x1d, 0xac, 0xf9, 0x58, 0x90, + 0xa7, 0xf8, 0x54, 0x9d, 0x7f, 0xdd, 0xb9, 0xa3, 0x13, 0xd6, 0x3e, 0x4d, 0xc2, 0x28, 0xdd, 0x37, + 0x7f, 0x06, 0x77, 0x8b, 0xec, 0xbd, 0x48, 0x10, 0x36, 0xc4, 0x2e, 0x81, 0x04, 0xac, 0x85, 0xd8, + 0x9d, 0x4d, 0x70, 0xa4, 0x6f, 0xe8, 0xdd, 0xa5, 0x0e, 0xd8, 0x4f, 0xb0, 0x87, 0x34, 0x1a, 0x06, + 0x7e, 0xae, 0xaf, 0xc3, 0x28, 0xe5, 0x36, 0xcf, 0x0d, 0xb0, 0x5d, 0x2c, 0xe0, 0x28, 0xe0, 0x02, + 0x3e, 0xb9, 0x66, 0x0f, 0xeb, 0x66, 0xf6, 0x90, 0x68, 0x65, 0x8e, 0x6d, 0x2d, 0xd8, 0x48, 0x23, + 0x05, 0x6b, 0x7c, 0x03, 0xea, 0x81, 0x20, 0x21, 0x6f, 0x55, 0x77, 0x56, 0x1e, 0x6c, 0xec, 0x3d, + 0xba, 0xf5, 0xc5, 0x39, 0x4d, 0xad, 0x52, 0xef, 0x49, 0x3e, 0x94, 0xd0, 0x9a, 0x7f, 0xac, 0x94, + 0x8f, 0x24, 0x4d, 0x03, 0xf7, 0x41, 0x2d, 0xa4, 0x1e, 0xd1, 0x17, 0xb6, 0x93, 0x1a, 0xaa, 0x4f, + 0x3d, 0x72, 0x39, 0xef, 0x96, 0xf2, 0x65, 0x0c, 0xa9, 0x6c, 0x48, 0x40, 0x83, 0x11, 0x2f, 0x60, + 0xc4, 0x15, 0xda, 0xc9, 0x07, 0x4b, 0x55, 0x8b, 0x34, 0x58, 0x5f, 0xc3, 0xa6, 0xec, 0x48, 0x1a, + 0x43, 0x19, 0x35, 0xfc, 0xd5, 0x00, 0xdb, 0x1a, 0x9d, 0x19, 0x40, 0xdb, 0xda, 0xb9, 0x75, 0x77, + 0x32, 0x26, 0xa7, 0xa5, 0x4f, 0xbb, 0xfd, 0xf9, 0x15, 0x0d, 0x74, 0x4d, 0x15, 0xfe, 0x00, 0xd6, + 0x71, 0xe2, 0x70, 0xc2, 0x5b, 0x35, 0x75, 0x41, 0x1f, 0xde, 0xba, 0x04, 0xfd, 0x56, 0x9c, 0x57, + 0x74, 0x01, 0xeb, 0x8f, 0x53, 0x6a, 0x94, 0xab, 0x98, 0xbf, 0x1b, 0x00, 0x5e, 0x7f, 0x8f, 0xf0, + 0x27, 0x00, 0x5c, 0x1a, 0x79, 0x81, 0x08, 0x68, 0xc4, 0x5b, 0x86, 0x2a, 0xe5, 0xb3, 0x97, 0x7c, + 0xe4, 0x87, 0x29, 0x61, 0x3e, 0xbd, 0xb2, 0x10, 0x47, 0x05, 0x3d, 0xf3, 0xdf, 0x2a, 0xb8, 0xf7, + 0x02, 0x3c, 0x3c, 0x04, 0x35, 0x71, 0x1a, 0xa7, 0x7e, 0xb2, 0x53, 0x3f, 0x1d, 0x9f, 0xc6, 0xd2, + 0x4f, 0xdd, 0x17, 0x40, 0x65, 0x0a, 0x52, 0x60, 0xf8, 0x28, 0x9b, 0x61, 0xc9, 0x98, 0x78, 0xad, + 0x3c, 0x82, 0x2e, 0xe7, 0xdd, 0x3b, 0x19, 0xac, 0x3c, 0x95, 0xe0, 0x0c, 0xc0, 0x09, 0xe6, 0xe2, + 0x98, 0xe1, 0x88, 0x27, 0xb4, 0x41, 0x98, 0x7a, 0xe6, 0x8d, 0x9b, 0x3d, 0x56, 0x89, 0x70, 0xda, + 0x5a, 0x12, 0x1e, 0x5d, 0x63, 0x43, 0xcf, 0x51, 0x80, 0xf7, 0xc1, 0x2a, 0x23, 0x98, 0xd3, 0xa8, + 0x55, 0x53, 0x25, 0x67, 0x53, 0x13, 0xa9, 0x28, 0xd2, 0xbb, 0x72, 0x04, 0x86, 0x84, 0x73, 0xec, + 0x93, 0x56, 0xbd, 0x3c, 0x02, 0xfb, 0x49, 0x18, 0xa5, 0xfb, 0xe6, 0x85, 0x01, 0xb6, 0x8e, 0xf6, + 0xb3, 0x67, 0x31, 0x9d, 0x10, 0x78, 0x00, 0x9a, 0x1e, 0xe1, 0x22, 0x88, 0xb0, 0x14, 0xee, 0x0d, + 0x74, 0x9b, 0xef, 0x6a, 0x8e, 0xe6, 0x47, 0xc5, 0x4d, 0x54, 0xce, 0x85, 0x3b, 0xa0, 0x16, 0x53, + 0x96, 0x3c, 0xd8, 0x7a, 0xfe, 0xed, 0x18, 0x50, 0x26, 0x90, 0xda, 0x81, 0xef, 0x81, 0x86, 0xfa, + 0x30, 0xba, 0x74, 0xa2, 0x5a, 0x96, 0x0f, 0x84, 0xc6, 0x40, 0xc7, 0x2f, 0xe7, 0xdd, 0xcd, 0xf4, + 0xb7, 0xba, 0xb1, 0x0c, 0x01, 0xf7, 0x00, 0x10, 0x98, 0xf9, 0x44, 0x48, 0x46, 0xd5, 0x86, 0x7a, + 0x6e, 0xa7, 0xe3, 0x6c, 0x07, 0x15, 0xb2, 0xcc, 0x11, 0x68, 0x96, 0x26, 0x32, 0xb4, 0x4b, 0xf3, + 0xe8, 0xde, 0x95, 0x79, 0xb4, 0xa1, 0x93, 0x0b, 0xa3, 0xe8, 0x3e, 0x58, 0x0d, 0x31, 0x17, 0x84, + 0x69, 0xaf, 0x64, 0x8d, 0xef, 0xab, 0x28, 0xd2, 0xbb, 0xe6, 0x5f, 0x06, 0xd8, 0x2a, 0x8f, 0x1d, + 0xf8, 0x23, 0x68, 0xb2, 0x42, 0x77, 0xd3, 0xc7, 0xb4, 0xdc, 0x28, 0x2b, 0xdf, 0x50, 0x7e, 0x15, + 0xc5, 0x28, 0x47, 0x65, 0x21, 0xd9, 0xaa, 0x21, 0x9e, 0x4c, 0x4e, 0xb0, 0x3b, 0xee, 0x0d, 0x74, + 0xe1, 0x59, 0xab, 0x3e, 0xc9, 0x76, 0x50, 0x21, 0xcb, 0xf9, 0xf2, 0xec, 0xa2, 0x53, 0x39, 0xbf, + 0xe8, 0x54, 0x9e, 0x5d, 0x74, 0x2a, 0xbf, 0x2c, 0x3a, 0xc6, 0xd9, 0xa2, 0x63, 0x9c, 0x2f, 0x3a, + 0xc6, 0xb3, 0x45, 0xc7, 0xf8, 0x7b, 0xd1, 0x31, 0x7e, 0xfb, 0xa7, 0x53, 0xf9, 0xfa, 0xe1, 0x12, + 0xff, 0xec, 0xfe, 0x0b, 0x00, 0x00, 0xff, 0xff, 0x7e, 0x47, 0x9e, 0x13, 0x07, 0x0a, 0x00, 0x00, +} + +func (m *EgressRouter) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EgressRouter) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EgressRouter) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *EgressRouterAddress) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EgressRouterAddress) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EgressRouterAddress) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Gateway) + copy(dAtA[i:], m.Gateway) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Gateway))) + i-- + dAtA[i] = 0x12 + i -= len(m.IP) + copy(dAtA[i:], m.IP) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.IP))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *EgressRouterInterface) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EgressRouterInterface) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EgressRouterInterface) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Macvlan.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *EgressRouterList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EgressRouterList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EgressRouterList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *EgressRouterSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EgressRouterSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EgressRouterSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Addresses) > 0 { + for iNdEx := len(m.Addresses) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Addresses[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + { + size, err := m.NetworkInterface.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if m.Redirect != nil { + { + size, err := m.Redirect.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Mode) + copy(dAtA[i:], m.Mode) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Mode))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *EgressRouterStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EgressRouterStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EgressRouterStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *EgressRouterStatusCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EgressRouterStatusCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EgressRouterStatusCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *L4RedirectRule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *L4RedirectRule) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *L4RedirectRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.TargetPort)) + i-- + dAtA[i] = 0x20 + i -= len(m.Protocol) + copy(dAtA[i:], m.Protocol) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Protocol))) + i-- + dAtA[i] = 0x1a + i = encodeVarintGenerated(dAtA, i, uint64(m.Port)) + i-- + dAtA[i] = 0x10 + i -= len(m.DestinationIP) + copy(dAtA[i:], m.DestinationIP) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DestinationIP))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *MacvlanConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MacvlanConfig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MacvlanConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Master) + copy(dAtA[i:], m.Master) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Master))) + i-- + dAtA[i] = 0x12 + i -= len(m.Mode) + copy(dAtA[i:], m.Mode) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Mode))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RedirectConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RedirectConfig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RedirectConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.FallbackIP) + copy(dAtA[i:], m.FallbackIP) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FallbackIP))) + i-- + dAtA[i] = 0x12 + if len(m.RedirectRules) > 0 { + for iNdEx := len(m.RedirectRules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.RedirectRules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *EgressRouter) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *EgressRouterAddress) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.IP) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Gateway) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *EgressRouterInterface) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Macvlan.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *EgressRouterList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *EgressRouterSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Mode) + n += 1 + l + sovGenerated(uint64(l)) + if m.Redirect != nil { + l = m.Redirect.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.NetworkInterface.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Addresses) > 0 { + for _, e := range m.Addresses { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *EgressRouterStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *EgressRouterStatusCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *L4RedirectRule) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.DestinationIP) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Port)) + l = len(m.Protocol) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.TargetPort)) + return n +} + +func (m *MacvlanConfig) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Mode) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Master) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *RedirectConfig) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.RedirectRules) > 0 { + for _, e := range m.RedirectRules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.FallbackIP) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *EgressRouter) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EgressRouter{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "EgressRouterSpec", "EgressRouterSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "EgressRouterStatus", "EgressRouterStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *EgressRouterAddress) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EgressRouterAddress{`, + `IP:` + fmt.Sprintf("%v", this.IP) + `,`, + `Gateway:` + fmt.Sprintf("%v", this.Gateway) + `,`, + `}`, + }, "") + return s +} +func (this *EgressRouterInterface) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EgressRouterInterface{`, + `Macvlan:` + strings.Replace(strings.Replace(this.Macvlan.String(), "MacvlanConfig", "MacvlanConfig", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *EgressRouterList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]EgressRouter{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "EgressRouter", "EgressRouter", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&EgressRouterList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *EgressRouterSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForAddresses := "[]EgressRouterAddress{" + for _, f := range this.Addresses { + repeatedStringForAddresses += strings.Replace(strings.Replace(f.String(), "EgressRouterAddress", "EgressRouterAddress", 1), `&`, ``, 1) + "," + } + repeatedStringForAddresses += "}" + s := strings.Join([]string{`&EgressRouterSpec{`, + `Mode:` + fmt.Sprintf("%v", this.Mode) + `,`, + `Redirect:` + strings.Replace(this.Redirect.String(), "RedirectConfig", "RedirectConfig", 1) + `,`, + `NetworkInterface:` + strings.Replace(strings.Replace(this.NetworkInterface.String(), "EgressRouterInterface", "EgressRouterInterface", 1), `&`, ``, 1) + `,`, + `Addresses:` + repeatedStringForAddresses + `,`, + `}`, + }, "") + return s +} +func (this *EgressRouterStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]EgressRouterStatusCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "EgressRouterStatusCondition", "EgressRouterStatusCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&EgressRouterStatus{`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s +} +func (this *EgressRouterStatusCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EgressRouterStatusCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *L4RedirectRule) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&L4RedirectRule{`, + `DestinationIP:` + fmt.Sprintf("%v", this.DestinationIP) + `,`, + `Port:` + fmt.Sprintf("%v", this.Port) + `,`, + `Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`, + `TargetPort:` + fmt.Sprintf("%v", this.TargetPort) + `,`, + `}`, + }, "") + return s +} +func (this *MacvlanConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MacvlanConfig{`, + `Mode:` + fmt.Sprintf("%v", this.Mode) + `,`, + `Master:` + fmt.Sprintf("%v", this.Master) + `,`, + `}`, + }, "") + return s +} +func (this *RedirectConfig) String() string { + if this == nil { + return "nil" + } + repeatedStringForRedirectRules := "[]L4RedirectRule{" + for _, f := range this.RedirectRules { + repeatedStringForRedirectRules += strings.Replace(strings.Replace(f.String(), "L4RedirectRule", "L4RedirectRule", 1), `&`, ``, 1) + "," + } + repeatedStringForRedirectRules += "}" + s := strings.Join([]string{`&RedirectConfig{`, + `RedirectRules:` + repeatedStringForRedirectRules + `,`, + `FallbackIP:` + fmt.Sprintf("%v", this.FallbackIP) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *EgressRouter) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EgressRouter: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EgressRouter: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EgressRouterAddress) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EgressRouterAddress: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EgressRouterAddress: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IP = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Gateway", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Gateway = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EgressRouterInterface) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EgressRouterInterface: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EgressRouterInterface: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Macvlan", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Macvlan.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EgressRouterList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EgressRouterList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EgressRouterList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, EgressRouter{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EgressRouterSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EgressRouterSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EgressRouterSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Mode = EgressRouterMode(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Redirect", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Redirect == nil { + m.Redirect = &RedirectConfig{} + } + if err := m.Redirect.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NetworkInterface", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.NetworkInterface.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Addresses", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Addresses = append(m.Addresses, EgressRouterAddress{}) + if err := m.Addresses[len(m.Addresses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EgressRouterStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EgressRouterStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EgressRouterStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, EgressRouterStatusCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EgressRouterStatusCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EgressRouterStatusCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EgressRouterStatusCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = EgressRouterStatusConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *L4RedirectRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: L4RedirectRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: L4RedirectRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DestinationIP", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DestinationIP = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + m.Port = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Port |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Protocol = ProtocolType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetPort", wireType) + } + m.TargetPort = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TargetPort |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MacvlanConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MacvlanConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MacvlanConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Mode = MacvlanMode(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Master", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Master = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RedirectConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RedirectConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RedirectConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RedirectRules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RedirectRules = append(m.RedirectRules, L4RedirectRule{}) + if err := m.RedirectRules[len(m.RedirectRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FallbackIP", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FallbackIP = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/openshift/api/networkoperator/v1/generated.proto b/vendor/github.com/openshift/api/networkoperator/v1/generated.proto new file mode 100644 index 000000000..540637af4 --- /dev/null +++ b/vendor/github.com/openshift/api/networkoperator/v1/generated.proto @@ -0,0 +1,189 @@ + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package github.com.openshift.api.networkoperator.v1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "github.com/openshift/api/networkoperator/v1"; + +// EgressRouter is a feature allowing the user to define an egress router +// that acts as a bridge between pods and external systems. The egress router runs +// a service that redirects egress traffic originating from a pod or a group of +// pods to a remote external system or multiple destinations as per configuration. +// +// It is consumed by the cluster-network-operator. +// More specifically, given an EgressRouter CR with , the CNO will create and manage: +// - A service called +// - An egress pod called +// - A NAD called +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +// EgressRouter is a single egressrouter pod configuration object. +// +k8s:openapi-gen=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:path=egressrouters,scope=Namespaced +// +kubebuilder:printcolumn:name="Condition",type=string,JSONPath=".status.conditions[*].type" +// +kubebuilder:printcolumn:name="Status",type=string,JSONPath=".status.conditions[*].status" +// +openshift:compatibility-gen:level=1 +message EgressRouter { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Specification of the desired egress router. + // +kubebuilder:validation:Required + optional EgressRouterSpec spec = 2; + + // Observed status of EgressRouter. + optional EgressRouterStatus status = 3; +} + +// EgressRouterAddress contains a pair of IP CIDR and gateway to be configured on the router's interface +// +kubebuilder:validation:Required +message EgressRouterAddress { + // IP is the address to configure on the router's interface. Can be IPv4 or IPv6. + // +kubebuilder:validation:Required + optional string ip = 1; + + // IP address of the next-hop gateway, if it cannot be automatically determined. Can be IPv4 or IPv6. + optional string gateway = 2; +} + +// EgressRouterInterface contains the configuration of interface to create/use. +message EgressRouterInterface { + // Arguments specific to the interfaceType macvlan + // +kubebuilder:default:={mode: Bridge} + optional MacvlanConfig macvlan = 1; +} + +// EgressRouterList is the list of egress router pods requested. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message EgressRouterList { + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + repeated EgressRouter items = 2; +} + +// EgressRouterSpec contains the configuration for an egress router. +// Mode, networkInterface and addresses fields must be specified along with exactly one "Config" that matches the mode. +// Each config consists of parameters specific to that mode. +// +k8s:openapi-gen=true +// +kubebuilder:validation:Required +message EgressRouterSpec { + // Mode depicts the mode that is used for the egress router. The default mode is "Redirect" and is the only supported mode currently. + // +kubebuilder:validation:Required + // +kubebuilder:validation:Enum="Redirect" + // +kubebuilder:default:="Redirect" + optional string mode = 1; + + // Redirect represents the configuration parameters specific to redirect mode. + optional RedirectConfig redirect = 2; + + // Specification of interface to create/use. The default is macvlan. + // Currently only macvlan is supported. + // +kubebuilder:validation:Required + // +kubebuilder:default:={macvlan: {mode: Bridge}} + optional EgressRouterInterface networkInterface = 3; + + // List of IP addresses to configure on the pod's secondary interface. + // +kubebuilder:validation:Required + repeated EgressRouterAddress addresses = 4; +} + +// EgressRouterStatus contains the observed status of EgressRouter. Read-only. +message EgressRouterStatus { + // Observed status of the egress router + // +kubebuilder:validation:Required + repeated EgressRouterStatusCondition conditions = 1; +} + +// EgressRouterStatusCondition represents the state of the egress router's +// managed and monitored components. +// +k8s:deepcopy-gen=true +message EgressRouterStatusCondition { + // Type specifies the aspect reported by this condition; one of Available, Progressing, Degraded + // +kubebuilder:validation:Required + // +kubebuilder:validation:Enum="Available";"Progressing";"Degraded" + // +required + optional string type = 1; + + // Status of the condition, one of True, False, Unknown. + // +kubebuilder:validation:Required + // +kubebuilder:validation:Enum="True";"False";"Unknown" + // +required + optional string status = 2; + + // LastTransitionTime is the time of the last update to the current status property. + // +kubebuilder:validation:Required + // +required + // +nullable + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + + // Reason is the CamelCase reason for the condition's current status. + optional string reason = 4; + + // Message provides additional information about the current condition. + // This is only to be consumed by humans. It may contain Line Feed + // characters (U+000A), which should be rendered as new lines. + optional string message = 5; +} + +// L4RedirectRule defines a DNAT redirection from a given port to a destination IP and port. +message L4RedirectRule { + // IP specifies the remote destination's IP address. Can be IPv4 or IPv6. + // +kubebuilder:validation:Required + optional string destinationIP = 1; + + // Port is the port number to which clients should send traffic to be redirected. + // +kubebuilder:validation:Required + // +kubebuilder:validation:Maximum:=65535 + // +kubebuilder:validation:Minimum:=1 + optional int32 port = 2; + + // Protocol can be TCP, SCTP or UDP. + // +kubebuilder:validation:Required + // +kubebuilder:validation:Enum="TCP";"UDP";"SCTP" + optional string protocol = 3; + + // TargetPort allows specifying the port number on the remote destination to which the traffic gets redirected to. + // If unspecified, the value from "Port" is used. + // +kubebuilder:validation:Maximum:=65535 + // +kubebuilder:validation:Minimum:=1 + optional int32 targetPort = 4; +} + +// MacvlanConfig consists of arguments specific to the macvlan EgressRouterInterfaceType +message MacvlanConfig { + // Mode depicts the mode that is used for the macvlan interface; one of Bridge|Private|VEPA|Passthru. The default mode is "Bridge". + // +kubebuilder:validation:Required + // +kubebuilder:validation:Enum="Bridge";"Private";"VEPA";"Passthru" + // +kubebuilder:default:="Bridge" + optional string mode = 1; + + // Name of the master interface. Need not be specified if it can be inferred from the IP address. + optional string master = 2; +} + +// RedirectConfig represents the configuration parameters specific to redirect mode. +message RedirectConfig { + // List of L4RedirectRules that define the DNAT redirection from the pod to the destination in redirect mode. + repeated L4RedirectRule redirectRules = 1; + + // FallbackIP specifies the remote destination's IP address. Can be IPv4 or IPv6. + // If no redirect rules are specified, all traffic from the router are redirected to this IP. + // If redirect rules are specified, then any connections on any other port (undefined in the rules) on the router will be redirected to this IP. + // If redirect rules are specified and no fallback IP is provided, connections on other ports will simply be rejected. + optional string fallbackIP = 2; +} + diff --git a/vendor/github.com/openshift/api/networkoperator/v1/register.go b/vendor/github.com/openshift/api/networkoperator/v1/register.go new file mode 100644 index 000000000..2fcb8dc0f --- /dev/null +++ b/vendor/github.com/openshift/api/networkoperator/v1/register.go @@ -0,0 +1,25 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + GroupName = "network.operator.openshift.io" + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // Install is a function which adds this version to a scheme + Install = schemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &EgressRouter{}, + &EgressRouterList{}, + ) + metav1.AddToGroupVersion(scheme, GroupVersion) + return nil +} diff --git a/vendor/github.com/openshift/api/networkoperator/v1/stable.egressrouter.testsuite.yaml b/vendor/github.com/openshift/api/networkoperator/v1/stable.egressrouter.testsuite.yaml new file mode 100644 index 000000000..2d4a476d3 --- /dev/null +++ b/vendor/github.com/openshift/api/networkoperator/v1/stable.egressrouter.testsuite.yaml @@ -0,0 +1,23 @@ +apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this +name: "[Stable] EgressRouter" +crd: 001-egressrouter.crd.yaml +tests: + onCreate: + - name: Should be able to create a minimal EgressRouter + initial: | + apiVersion: network.operator.openshift.io/v1 + kind: EgressRouter + spec: + mode: Redirect + redirect: {} + addresses: [] + expected: | + apiVersion: network.operator.openshift.io/v1 + kind: EgressRouter + spec: + mode: Redirect + redirect: {} + addresses: [] + networkInterface: + macvlan: + mode: Bridge diff --git a/vendor/github.com/openshift/api/networkoperator/v1/types_egressrouter.go b/vendor/github.com/openshift/api/networkoperator/v1/types_egressrouter.go new file mode 100644 index 000000000..4acebb177 --- /dev/null +++ b/vendor/github.com/openshift/api/networkoperator/v1/types_egressrouter.go @@ -0,0 +1,265 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// EgressRouter is a feature allowing the user to define an egress router +// that acts as a bridge between pods and external systems. The egress router runs +// a service that redirects egress traffic originating from a pod or a group of +// pods to a remote external system or multiple destinations as per configuration. +// +// It is consumed by the cluster-network-operator. +// More specifically, given an EgressRouter CR with , the CNO will create and manage: +// - A service called +// - An egress pod called +// - A NAD called +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +// EgressRouter is a single egressrouter pod configuration object. +// +k8s:openapi-gen=true +// +kubebuilder:subresource:status +// +kubebuilder:resource:path=egressrouters,scope=Namespaced +// +kubebuilder:printcolumn:name="Condition",type=string,JSONPath=".status.conditions[*].type" +// +kubebuilder:printcolumn:name="Status",type=string,JSONPath=".status.conditions[*].status" +// +openshift:compatibility-gen:level=1 +type EgressRouter struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Specification of the desired egress router. + // +kubebuilder:validation:Required + Spec EgressRouterSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + + // Observed status of EgressRouter. + Status EgressRouterStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// EgressRouterSpec contains the configuration for an egress router. +// Mode, networkInterface and addresses fields must be specified along with exactly one "Config" that matches the mode. +// Each config consists of parameters specific to that mode. +// +k8s:openapi-gen=true +// +kubebuilder:validation:Required +type EgressRouterSpec struct { + // Mode depicts the mode that is used for the egress router. The default mode is "Redirect" and is the only supported mode currently. + // +kubebuilder:validation:Required + // +kubebuilder:validation:Enum="Redirect" + // +kubebuilder:default:="Redirect" + Mode EgressRouterMode `json:"mode" protobuf:"bytes,1,opt,name=mode,casttype=EgressRouterMode"` + + // Redirect represents the configuration parameters specific to redirect mode. + Redirect *RedirectConfig `json:"redirect,omitempty" protobuf:"bytes,2,opt,name=redirect"` + + // Specification of interface to create/use. The default is macvlan. + // Currently only macvlan is supported. + // +kubebuilder:validation:Required + // +kubebuilder:default:={macvlan: {mode: Bridge}} + NetworkInterface EgressRouterInterface `json:"networkInterface" protobuf:"bytes,3,opt,name=networkInterface"` + + // List of IP addresses to configure on the pod's secondary interface. + // +kubebuilder:validation:Required + Addresses []EgressRouterAddress `json:"addresses" protobuf:"bytes,4,rep,name=addresses"` +} + +// EgressRouterMode defines the different types of modes that are supported for the egress router interface. +// The default mode is "Redirect" and is the only supported mode currently. +type EgressRouterMode string + +const ( + // EgressRouterModeRedirect creates an egress router that sets up iptables rules to redirect traffic + // from its own IP address to one or more remote destination IP addresses. + EgressRouterModeRedirect EgressRouterMode = "Redirect" +) + +// RedirectConfig represents the configuration parameters specific to redirect mode. +type RedirectConfig struct { + // List of L4RedirectRules that define the DNAT redirection from the pod to the destination in redirect mode. + RedirectRules []L4RedirectRule `json:"redirectRules,omitempty" protobuf:"bytes,1,rep,name=redirectRules"` + + // FallbackIP specifies the remote destination's IP address. Can be IPv4 or IPv6. + // If no redirect rules are specified, all traffic from the router are redirected to this IP. + // If redirect rules are specified, then any connections on any other port (undefined in the rules) on the router will be redirected to this IP. + // If redirect rules are specified and no fallback IP is provided, connections on other ports will simply be rejected. + FallbackIP string `json:"fallbackIP,omitempty" protobuf:"bytes,2,opt,name=fallbackIP"` +} + +// L4RedirectRule defines a DNAT redirection from a given port to a destination IP and port. +type L4RedirectRule struct { + // IP specifies the remote destination's IP address. Can be IPv4 or IPv6. + // +kubebuilder:validation:Required + DestinationIP string `json:"destinationIP" protobuf:"bytes,1,opt,name=destinationIP"` + + // Port is the port number to which clients should send traffic to be redirected. + // +kubebuilder:validation:Required + // +kubebuilder:validation:Maximum:=65535 + // +kubebuilder:validation:Minimum:=1 + Port int32 `json:"port" protobuf:"varint,2,opt,name=port"` + + // Protocol can be TCP, SCTP or UDP. + // +kubebuilder:validation:Required + // +kubebuilder:validation:Enum="TCP";"UDP";"SCTP" + Protocol ProtocolType `json:"protocol" protobuf:"bytes,3,opt,name=protocol,casttype=ProtocolType"` + + // TargetPort allows specifying the port number on the remote destination to which the traffic gets redirected to. + // If unspecified, the value from "Port" is used. + // +kubebuilder:validation:Maximum:=65535 + // +kubebuilder:validation:Minimum:=1 + TargetPort int32 `json:"targetPort,omitempty" protobuf:"varint,4,opt,name=targetPort"` +} + +// ProtocolType defines the protocol types that are supported +type ProtocolType string + +const ( + // ProtocolTypeTCP refers to the TCP protocol + ProtocolTypeTCP ProtocolType = "TCP" + + // ProtocolTypeUDP refers to the UDP protocol + ProtocolTypeUDP ProtocolType = "UDP" + + // ProtocolTypeSCTP refers to the SCTP protocol + ProtocolTypeSCTP ProtocolType = "SCTP" +) + +// EgressRouterInterface contains the configuration of interface to create/use. +type EgressRouterInterface struct { + // Arguments specific to the interfaceType macvlan + // +kubebuilder:default:={mode: Bridge} + Macvlan MacvlanConfig `json:"macvlan" protobuf:"bytes,1,opt,name=macvlan"` +} + +// MacvlanMode defines the different types of modes that are supported for the macvlan interface. +// source: https://man7.org/linux/man-pages/man8/ip-link.8.html +type MacvlanMode string + +const ( + // MacvlanModeBridge connects all endpoints directly to each other, communication is not redirected through the physical interface's peer. + MacvlanModeBridge MacvlanMode = "Bridge" + + // MacvlanModePrivate does not allow communication between macvlan instances on the same physical interface, + // even if the external switch supports hairpin mode. + MacvlanModePrivate MacvlanMode = "Private" + + // MacvlanModeVEPA is the Virtual Ethernet Port Aggregator mode. Data from one macvlan instance to the other on the + // same physical interface is transmitted over the physical interface. Either the attached switch needs + // to support hairpin mode, or there must be a TCP/IP router forwarding the packets in order to allow + // communication. This is the default mode. + MacvlanModeVEPA MacvlanMode = "VEPA" + + // MacvlanModePassthru mode gives more power to a single endpoint, usually in macvtap mode. + // It is not allowed for more than one endpoint on the same physical interface. All traffic will be forwarded + // to this endpoint, allowing virtio guests to change MAC address or set promiscuous mode in order to bridge the + // interface or create vlan interfaces on top of it. + MacvlanModePassthru MacvlanMode = "Passthru" +) + +// MacvlanConfig consists of arguments specific to the macvlan EgressRouterInterfaceType +type MacvlanConfig struct { + // Mode depicts the mode that is used for the macvlan interface; one of Bridge|Private|VEPA|Passthru. The default mode is "Bridge". + // +kubebuilder:validation:Required + // +kubebuilder:validation:Enum="Bridge";"Private";"VEPA";"Passthru" + // +kubebuilder:default:="Bridge" + Mode MacvlanMode `json:"mode" protobuf:"bytes,1,opt,name=mode,casttype=MacvlanMode"` + + // Name of the master interface. Need not be specified if it can be inferred from the IP address. + Master string `json:"master,omitempty" protobuf:"bytes,2,opt,name=master"` +} + +// EgressRouterAddress contains a pair of IP CIDR and gateway to be configured on the router's interface +// +kubebuilder:validation:Required +type EgressRouterAddress struct { + // IP is the address to configure on the router's interface. Can be IPv4 or IPv6. + // +kubebuilder:validation:Required + IP string `json:"ip" protobuf:"bytes,1,opt,name=ip"` + // IP address of the next-hop gateway, if it cannot be automatically determined. Can be IPv4 or IPv6. + Gateway string `json:"gateway,omitempty" protobuf:"bytes,2,opt,name=gateway"` +} + +// EgressRouterStatusConditionType is an aspect of the router's state. +type EgressRouterStatusConditionType string + +const ( + // EgressRouterAvailable indicates that the EgressRouter (the associated pod, service, NAD), is functional and available in the cluster. + EgressRouterAvailable EgressRouterStatusConditionType = "Available" + + // EgressRouterProgressing indicates that the router is actively rolling out new code, + // propagating config changes, or otherwise moving from one steady state to + // another. + EgressRouterProgressing EgressRouterStatusConditionType = "Progressing" + + // EgressRouterDegraded indicates that the router's current state does not match its + // desired state over a period of time resulting in a lower quality of service. + EgressRouterDegraded EgressRouterStatusConditionType = "Degraded" +) + +// ConditionStatus defines the status of each of EgressRouterStatusConditionType. +type ConditionStatus string + +// These are valid condition statuses. "ConditionTrue" means a resource is in the condition. +// "ConditionFalse" means a resource is not in the condition. "ConditionUnknown" means kubernetes +// can't decide if a resource is in the condition or not. In the future, we could add other +// intermediate conditions, e.g. ConditionDegraded. +const ( + ConditionTrue ConditionStatus = "True" + ConditionFalse ConditionStatus = "False" + ConditionUnknown ConditionStatus = "Unknown" +) + +// EgressRouterStatusCondition represents the state of the egress router's +// managed and monitored components. +// +k8s:deepcopy-gen=true +type EgressRouterStatusCondition struct { + // Type specifies the aspect reported by this condition; one of Available, Progressing, Degraded + // +kubebuilder:validation:Required + // +kubebuilder:validation:Enum="Available";"Progressing";"Degraded" + // +required + Type EgressRouterStatusConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=EgressRouterStatusConditionType"` + + // Status of the condition, one of True, False, Unknown. + // +kubebuilder:validation:Required + // +kubebuilder:validation:Enum="True";"False";"Unknown" + // +required + Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"` + + // LastTransitionTime is the time of the last update to the current status property. + // +kubebuilder:validation:Required + // +required + // +nullable + LastTransitionTime metav1.Time `json:"lastTransitionTime" protobuf:"bytes,3,opt,name=lastTransitionTime"` + + // Reason is the CamelCase reason for the condition's current status. + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + + // Message provides additional information about the current condition. + // This is only to be consumed by humans. It may contain Line Feed + // characters (U+000A), which should be rendered as new lines. + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` +} + +// EgressRouterStatus contains the observed status of EgressRouter. Read-only. +type EgressRouterStatus struct { + // Observed status of the egress router + // +kubebuilder:validation:Required + Conditions []EgressRouterStatusCondition `json:"conditions,omitempty" protobuf:"bytes,1,rep,name=conditions"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// EgressRouterList is the list of egress router pods requested. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type EgressRouterList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + Items []EgressRouter `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/github.com/openshift/api/networkoperator/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/networkoperator/v1/zz_generated.deepcopy.go new file mode 100644 index 000000000..e58d3dfaa --- /dev/null +++ b/vendor/github.com/openshift/api/networkoperator/v1/zz_generated.deepcopy.go @@ -0,0 +1,224 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EgressRouter) DeepCopyInto(out *EgressRouter) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EgressRouter. +func (in *EgressRouter) DeepCopy() *EgressRouter { + if in == nil { + return nil + } + out := new(EgressRouter) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EgressRouter) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EgressRouterAddress) DeepCopyInto(out *EgressRouterAddress) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EgressRouterAddress. +func (in *EgressRouterAddress) DeepCopy() *EgressRouterAddress { + if in == nil { + return nil + } + out := new(EgressRouterAddress) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EgressRouterInterface) DeepCopyInto(out *EgressRouterInterface) { + *out = *in + out.Macvlan = in.Macvlan + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EgressRouterInterface. +func (in *EgressRouterInterface) DeepCopy() *EgressRouterInterface { + if in == nil { + return nil + } + out := new(EgressRouterInterface) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EgressRouterList) DeepCopyInto(out *EgressRouterList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]EgressRouter, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EgressRouterList. +func (in *EgressRouterList) DeepCopy() *EgressRouterList { + if in == nil { + return nil + } + out := new(EgressRouterList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EgressRouterList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EgressRouterSpec) DeepCopyInto(out *EgressRouterSpec) { + *out = *in + if in.Redirect != nil { + in, out := &in.Redirect, &out.Redirect + *out = new(RedirectConfig) + (*in).DeepCopyInto(*out) + } + out.NetworkInterface = in.NetworkInterface + if in.Addresses != nil { + in, out := &in.Addresses, &out.Addresses + *out = make([]EgressRouterAddress, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EgressRouterSpec. +func (in *EgressRouterSpec) DeepCopy() *EgressRouterSpec { + if in == nil { + return nil + } + out := new(EgressRouterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EgressRouterStatus) DeepCopyInto(out *EgressRouterStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]EgressRouterStatusCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EgressRouterStatus. +func (in *EgressRouterStatus) DeepCopy() *EgressRouterStatus { + if in == nil { + return nil + } + out := new(EgressRouterStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EgressRouterStatusCondition) DeepCopyInto(out *EgressRouterStatusCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EgressRouterStatusCondition. +func (in *EgressRouterStatusCondition) DeepCopy() *EgressRouterStatusCondition { + if in == nil { + return nil + } + out := new(EgressRouterStatusCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *L4RedirectRule) DeepCopyInto(out *L4RedirectRule) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new L4RedirectRule. +func (in *L4RedirectRule) DeepCopy() *L4RedirectRule { + if in == nil { + return nil + } + out := new(L4RedirectRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MacvlanConfig) DeepCopyInto(out *MacvlanConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MacvlanConfig. +func (in *MacvlanConfig) DeepCopy() *MacvlanConfig { + if in == nil { + return nil + } + out := new(MacvlanConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedirectConfig) DeepCopyInto(out *RedirectConfig) { + *out = *in + if in.RedirectRules != nil { + in, out := &in.RedirectRules, &out.RedirectRules + *out = make([]L4RedirectRule, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedirectConfig. +func (in *RedirectConfig) DeepCopy() *RedirectConfig { + if in == nil { + return nil + } + out := new(RedirectConfig) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/api/networkoperator/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/networkoperator/v1/zz_generated.swagger_doc_generated.go new file mode 100644 index 000000000..97bec9e29 --- /dev/null +++ b/vendor/github.com/openshift/api/networkoperator/v1/zz_generated.swagger_doc_generated.go @@ -0,0 +1,119 @@ +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_EgressRouter = map[string]string{ + "": "EgressRouter is a feature allowing the user to define an egress router that acts as a bridge between pods and external systems. The egress router runs a service that redirects egress traffic originating from a pod or a group of pods to a remote external system or multiple destinations as per configuration.\n\nIt is consumed by the cluster-network-operator. More specifically, given an EgressRouter CR with , the CNO will create and manage: - A service called - An egress pod called - A NAD called \n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).\n\nEgressRouter is a single egressrouter pod configuration object.", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Specification of the desired egress router.", + "status": "Observed status of EgressRouter.", +} + +func (EgressRouter) SwaggerDoc() map[string]string { + return map_EgressRouter +} + +var map_EgressRouterAddress = map[string]string{ + "": "EgressRouterAddress contains a pair of IP CIDR and gateway to be configured on the router's interface", + "ip": "IP is the address to configure on the router's interface. Can be IPv4 or IPv6.", + "gateway": "IP address of the next-hop gateway, if it cannot be automatically determined. Can be IPv4 or IPv6.", +} + +func (EgressRouterAddress) SwaggerDoc() map[string]string { + return map_EgressRouterAddress +} + +var map_EgressRouterInterface = map[string]string{ + "": "EgressRouterInterface contains the configuration of interface to create/use.", + "macvlan": "Arguments specific to the interfaceType macvlan", +} + +func (EgressRouterInterface) SwaggerDoc() map[string]string { + return map_EgressRouterInterface +} + +var map_EgressRouterList = map[string]string{ + "": "EgressRouterList is the list of egress router pods requested.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (EgressRouterList) SwaggerDoc() map[string]string { + return map_EgressRouterList +} + +var map_EgressRouterSpec = map[string]string{ + "": "EgressRouterSpec contains the configuration for an egress router. Mode, networkInterface and addresses fields must be specified along with exactly one \"Config\" that matches the mode. Each config consists of parameters specific to that mode.", + "mode": "Mode depicts the mode that is used for the egress router. The default mode is \"Redirect\" and is the only supported mode currently.", + "redirect": "Redirect represents the configuration parameters specific to redirect mode.", + "networkInterface": "Specification of interface to create/use. The default is macvlan. Currently only macvlan is supported.", + "addresses": "List of IP addresses to configure on the pod's secondary interface.", +} + +func (EgressRouterSpec) SwaggerDoc() map[string]string { + return map_EgressRouterSpec +} + +var map_EgressRouterStatus = map[string]string{ + "": "EgressRouterStatus contains the observed status of EgressRouter. Read-only.", + "conditions": "Observed status of the egress router", +} + +func (EgressRouterStatus) SwaggerDoc() map[string]string { + return map_EgressRouterStatus +} + +var map_EgressRouterStatusCondition = map[string]string{ + "": "EgressRouterStatusCondition represents the state of the egress router's managed and monitored components.", + "type": "Type specifies the aspect reported by this condition; one of Available, Progressing, Degraded", + "status": "Status of the condition, one of True, False, Unknown.", + "lastTransitionTime": "LastTransitionTime is the time of the last update to the current status property.", + "reason": "Reason is the CamelCase reason for the condition's current status.", + "message": "Message provides additional information about the current condition. This is only to be consumed by humans. It may contain Line Feed characters (U+000A), which should be rendered as new lines.", +} + +func (EgressRouterStatusCondition) SwaggerDoc() map[string]string { + return map_EgressRouterStatusCondition +} + +var map_L4RedirectRule = map[string]string{ + "": "L4RedirectRule defines a DNAT redirection from a given port to a destination IP and port.", + "destinationIP": "IP specifies the remote destination's IP address. Can be IPv4 or IPv6.", + "port": "Port is the port number to which clients should send traffic to be redirected.", + "protocol": "Protocol can be TCP, SCTP or UDP.", + "targetPort": "TargetPort allows specifying the port number on the remote destination to which the traffic gets redirected to. If unspecified, the value from \"Port\" is used.", +} + +func (L4RedirectRule) SwaggerDoc() map[string]string { + return map_L4RedirectRule +} + +var map_MacvlanConfig = map[string]string{ + "": "MacvlanConfig consists of arguments specific to the macvlan EgressRouterInterfaceType", + "mode": "Mode depicts the mode that is used for the macvlan interface; one of Bridge|Private|VEPA|Passthru. The default mode is \"Bridge\".", + "master": "Name of the master interface. Need not be specified if it can be inferred from the IP address.", +} + +func (MacvlanConfig) SwaggerDoc() map[string]string { + return map_MacvlanConfig +} + +var map_RedirectConfig = map[string]string{ + "": "RedirectConfig represents the configuration parameters specific to redirect mode.", + "redirectRules": "List of L4RedirectRules that define the DNAT redirection from the pod to the destination in redirect mode.", + "fallbackIP": "FallbackIP specifies the remote destination's IP address. Can be IPv4 or IPv6. If no redirect rules are specified, all traffic from the router are redirected to this IP. If redirect rules are specified, then any connections on any other port (undefined in the rules) on the router will be redirected to this IP. If redirect rules are specified and no fallback IP is provided, connections on other ports will simply be rejected.", +} + +func (RedirectConfig) SwaggerDoc() map[string]string { + return map_RedirectConfig +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/api/oauth/.codegen.yaml b/vendor/github.com/openshift/api/oauth/.codegen.yaml new file mode 100644 index 000000000..ffa2c8d9b --- /dev/null +++ b/vendor/github.com/openshift/api/oauth/.codegen.yaml @@ -0,0 +1,2 @@ +swaggerdocs: + commentPolicy: Warn diff --git a/vendor/github.com/openshift/api/oauth/install.go b/vendor/github.com/openshift/api/oauth/install.go new file mode 100644 index 000000000..6bf63539d --- /dev/null +++ b/vendor/github.com/openshift/api/oauth/install.go @@ -0,0 +1,26 @@ +package oauth + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + oauthv1 "github.com/openshift/api/oauth/v1" +) + +const ( + GroupName = "oauth.openshift.io" +) + +var ( + schemeBuilder = runtime.NewSchemeBuilder(oauthv1.Install) + // Install is a function which adds every version of this group to a scheme + Install = schemeBuilder.AddToScheme +) + +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +func Kind(kind string) schema.GroupKind { + return schema.GroupKind{Group: GroupName, Kind: kind} +} diff --git a/vendor/github.com/openshift/api/oauth/v1/doc.go b/vendor/github.com/openshift/api/oauth/v1/doc.go new file mode 100644 index 000000000..cae9e70d4 --- /dev/null +++ b/vendor/github.com/openshift/api/oauth/v1/doc.go @@ -0,0 +1,8 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=github.com/openshift/origin/pkg/oauth/apis/oauth +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true + +// +groupName=oauth.openshift.io +// Package v1 is the v1 version of the API. +package v1 diff --git a/vendor/github.com/openshift/api/oauth/v1/generated.pb.go b/vendor/github.com/openshift/api/oauth/v1/generated.pb.go new file mode 100644 index 000000000..a79c46802 --- /dev/null +++ b/vendor/github.com/openshift/api/oauth/v1/generated.pb.go @@ -0,0 +1,4624 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/openshift/api/oauth/v1/generated.proto + +package v1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *ClusterRoleScopeRestriction) Reset() { *m = ClusterRoleScopeRestriction{} } +func (*ClusterRoleScopeRestriction) ProtoMessage() {} +func (*ClusterRoleScopeRestriction) Descriptor() ([]byte, []int) { + return fileDescriptor_bd688dca7ea39c8a, []int{0} +} +func (m *ClusterRoleScopeRestriction) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterRoleScopeRestriction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterRoleScopeRestriction) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterRoleScopeRestriction.Merge(m, src) +} +func (m *ClusterRoleScopeRestriction) XXX_Size() int { + return m.Size() +} +func (m *ClusterRoleScopeRestriction) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterRoleScopeRestriction.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterRoleScopeRestriction proto.InternalMessageInfo + +func (m *OAuthAccessToken) Reset() { *m = OAuthAccessToken{} } +func (*OAuthAccessToken) ProtoMessage() {} +func (*OAuthAccessToken) Descriptor() ([]byte, []int) { + return fileDescriptor_bd688dca7ea39c8a, []int{1} +} +func (m *OAuthAccessToken) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *OAuthAccessToken) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *OAuthAccessToken) XXX_Merge(src proto.Message) { + xxx_messageInfo_OAuthAccessToken.Merge(m, src) +} +func (m *OAuthAccessToken) XXX_Size() int { + return m.Size() +} +func (m *OAuthAccessToken) XXX_DiscardUnknown() { + xxx_messageInfo_OAuthAccessToken.DiscardUnknown(m) +} + +var xxx_messageInfo_OAuthAccessToken proto.InternalMessageInfo + +func (m *OAuthAccessTokenList) Reset() { *m = OAuthAccessTokenList{} } +func (*OAuthAccessTokenList) ProtoMessage() {} +func (*OAuthAccessTokenList) Descriptor() ([]byte, []int) { + return fileDescriptor_bd688dca7ea39c8a, []int{2} +} +func (m *OAuthAccessTokenList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *OAuthAccessTokenList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *OAuthAccessTokenList) XXX_Merge(src proto.Message) { + xxx_messageInfo_OAuthAccessTokenList.Merge(m, src) +} +func (m *OAuthAccessTokenList) XXX_Size() int { + return m.Size() +} +func (m *OAuthAccessTokenList) XXX_DiscardUnknown() { + xxx_messageInfo_OAuthAccessTokenList.DiscardUnknown(m) +} + +var xxx_messageInfo_OAuthAccessTokenList proto.InternalMessageInfo + +func (m *OAuthAuthorizeToken) Reset() { *m = OAuthAuthorizeToken{} } +func (*OAuthAuthorizeToken) ProtoMessage() {} +func (*OAuthAuthorizeToken) Descriptor() ([]byte, []int) { + return fileDescriptor_bd688dca7ea39c8a, []int{3} +} +func (m *OAuthAuthorizeToken) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *OAuthAuthorizeToken) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *OAuthAuthorizeToken) XXX_Merge(src proto.Message) { + xxx_messageInfo_OAuthAuthorizeToken.Merge(m, src) +} +func (m *OAuthAuthorizeToken) XXX_Size() int { + return m.Size() +} +func (m *OAuthAuthorizeToken) XXX_DiscardUnknown() { + xxx_messageInfo_OAuthAuthorizeToken.DiscardUnknown(m) +} + +var xxx_messageInfo_OAuthAuthorizeToken proto.InternalMessageInfo + +func (m *OAuthAuthorizeTokenList) Reset() { *m = OAuthAuthorizeTokenList{} } +func (*OAuthAuthorizeTokenList) ProtoMessage() {} +func (*OAuthAuthorizeTokenList) Descriptor() ([]byte, []int) { + return fileDescriptor_bd688dca7ea39c8a, []int{4} +} +func (m *OAuthAuthorizeTokenList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *OAuthAuthorizeTokenList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *OAuthAuthorizeTokenList) XXX_Merge(src proto.Message) { + xxx_messageInfo_OAuthAuthorizeTokenList.Merge(m, src) +} +func (m *OAuthAuthorizeTokenList) XXX_Size() int { + return m.Size() +} +func (m *OAuthAuthorizeTokenList) XXX_DiscardUnknown() { + xxx_messageInfo_OAuthAuthorizeTokenList.DiscardUnknown(m) +} + +var xxx_messageInfo_OAuthAuthorizeTokenList proto.InternalMessageInfo + +func (m *OAuthClient) Reset() { *m = OAuthClient{} } +func (*OAuthClient) ProtoMessage() {} +func (*OAuthClient) Descriptor() ([]byte, []int) { + return fileDescriptor_bd688dca7ea39c8a, []int{5} +} +func (m *OAuthClient) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *OAuthClient) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *OAuthClient) XXX_Merge(src proto.Message) { + xxx_messageInfo_OAuthClient.Merge(m, src) +} +func (m *OAuthClient) XXX_Size() int { + return m.Size() +} +func (m *OAuthClient) XXX_DiscardUnknown() { + xxx_messageInfo_OAuthClient.DiscardUnknown(m) +} + +var xxx_messageInfo_OAuthClient proto.InternalMessageInfo + +func (m *OAuthClientAuthorization) Reset() { *m = OAuthClientAuthorization{} } +func (*OAuthClientAuthorization) ProtoMessage() {} +func (*OAuthClientAuthorization) Descriptor() ([]byte, []int) { + return fileDescriptor_bd688dca7ea39c8a, []int{6} +} +func (m *OAuthClientAuthorization) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *OAuthClientAuthorization) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *OAuthClientAuthorization) XXX_Merge(src proto.Message) { + xxx_messageInfo_OAuthClientAuthorization.Merge(m, src) +} +func (m *OAuthClientAuthorization) XXX_Size() int { + return m.Size() +} +func (m *OAuthClientAuthorization) XXX_DiscardUnknown() { + xxx_messageInfo_OAuthClientAuthorization.DiscardUnknown(m) +} + +var xxx_messageInfo_OAuthClientAuthorization proto.InternalMessageInfo + +func (m *OAuthClientAuthorizationList) Reset() { *m = OAuthClientAuthorizationList{} } +func (*OAuthClientAuthorizationList) ProtoMessage() {} +func (*OAuthClientAuthorizationList) Descriptor() ([]byte, []int) { + return fileDescriptor_bd688dca7ea39c8a, []int{7} +} +func (m *OAuthClientAuthorizationList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *OAuthClientAuthorizationList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *OAuthClientAuthorizationList) XXX_Merge(src proto.Message) { + xxx_messageInfo_OAuthClientAuthorizationList.Merge(m, src) +} +func (m *OAuthClientAuthorizationList) XXX_Size() int { + return m.Size() +} +func (m *OAuthClientAuthorizationList) XXX_DiscardUnknown() { + xxx_messageInfo_OAuthClientAuthorizationList.DiscardUnknown(m) +} + +var xxx_messageInfo_OAuthClientAuthorizationList proto.InternalMessageInfo + +func (m *OAuthClientList) Reset() { *m = OAuthClientList{} } +func (*OAuthClientList) ProtoMessage() {} +func (*OAuthClientList) Descriptor() ([]byte, []int) { + return fileDescriptor_bd688dca7ea39c8a, []int{8} +} +func (m *OAuthClientList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *OAuthClientList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *OAuthClientList) XXX_Merge(src proto.Message) { + xxx_messageInfo_OAuthClientList.Merge(m, src) +} +func (m *OAuthClientList) XXX_Size() int { + return m.Size() +} +func (m *OAuthClientList) XXX_DiscardUnknown() { + xxx_messageInfo_OAuthClientList.DiscardUnknown(m) +} + +var xxx_messageInfo_OAuthClientList proto.InternalMessageInfo + +func (m *OAuthRedirectReference) Reset() { *m = OAuthRedirectReference{} } +func (*OAuthRedirectReference) ProtoMessage() {} +func (*OAuthRedirectReference) Descriptor() ([]byte, []int) { + return fileDescriptor_bd688dca7ea39c8a, []int{9} +} +func (m *OAuthRedirectReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *OAuthRedirectReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *OAuthRedirectReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_OAuthRedirectReference.Merge(m, src) +} +func (m *OAuthRedirectReference) XXX_Size() int { + return m.Size() +} +func (m *OAuthRedirectReference) XXX_DiscardUnknown() { + xxx_messageInfo_OAuthRedirectReference.DiscardUnknown(m) +} + +var xxx_messageInfo_OAuthRedirectReference proto.InternalMessageInfo + +func (m *RedirectReference) Reset() { *m = RedirectReference{} } +func (*RedirectReference) ProtoMessage() {} +func (*RedirectReference) Descriptor() ([]byte, []int) { + return fileDescriptor_bd688dca7ea39c8a, []int{10} +} +func (m *RedirectReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RedirectReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RedirectReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_RedirectReference.Merge(m, src) +} +func (m *RedirectReference) XXX_Size() int { + return m.Size() +} +func (m *RedirectReference) XXX_DiscardUnknown() { + xxx_messageInfo_RedirectReference.DiscardUnknown(m) +} + +var xxx_messageInfo_RedirectReference proto.InternalMessageInfo + +func (m *ScopeRestriction) Reset() { *m = ScopeRestriction{} } +func (*ScopeRestriction) ProtoMessage() {} +func (*ScopeRestriction) Descriptor() ([]byte, []int) { + return fileDescriptor_bd688dca7ea39c8a, []int{11} +} +func (m *ScopeRestriction) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ScopeRestriction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ScopeRestriction) XXX_Merge(src proto.Message) { + xxx_messageInfo_ScopeRestriction.Merge(m, src) +} +func (m *ScopeRestriction) XXX_Size() int { + return m.Size() +} +func (m *ScopeRestriction) XXX_DiscardUnknown() { + xxx_messageInfo_ScopeRestriction.DiscardUnknown(m) +} + +var xxx_messageInfo_ScopeRestriction proto.InternalMessageInfo + +func (m *UserOAuthAccessToken) Reset() { *m = UserOAuthAccessToken{} } +func (*UserOAuthAccessToken) ProtoMessage() {} +func (*UserOAuthAccessToken) Descriptor() ([]byte, []int) { + return fileDescriptor_bd688dca7ea39c8a, []int{12} +} +func (m *UserOAuthAccessToken) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UserOAuthAccessToken) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *UserOAuthAccessToken) XXX_Merge(src proto.Message) { + xxx_messageInfo_UserOAuthAccessToken.Merge(m, src) +} +func (m *UserOAuthAccessToken) XXX_Size() int { + return m.Size() +} +func (m *UserOAuthAccessToken) XXX_DiscardUnknown() { + xxx_messageInfo_UserOAuthAccessToken.DiscardUnknown(m) +} + +var xxx_messageInfo_UserOAuthAccessToken proto.InternalMessageInfo + +func (m *UserOAuthAccessTokenList) Reset() { *m = UserOAuthAccessTokenList{} } +func (*UserOAuthAccessTokenList) ProtoMessage() {} +func (*UserOAuthAccessTokenList) Descriptor() ([]byte, []int) { + return fileDescriptor_bd688dca7ea39c8a, []int{13} +} +func (m *UserOAuthAccessTokenList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UserOAuthAccessTokenList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *UserOAuthAccessTokenList) XXX_Merge(src proto.Message) { + xxx_messageInfo_UserOAuthAccessTokenList.Merge(m, src) +} +func (m *UserOAuthAccessTokenList) XXX_Size() int { + return m.Size() +} +func (m *UserOAuthAccessTokenList) XXX_DiscardUnknown() { + xxx_messageInfo_UserOAuthAccessTokenList.DiscardUnknown(m) +} + +var xxx_messageInfo_UserOAuthAccessTokenList proto.InternalMessageInfo + +func init() { + proto.RegisterType((*ClusterRoleScopeRestriction)(nil), "github.com.openshift.api.oauth.v1.ClusterRoleScopeRestriction") + proto.RegisterType((*OAuthAccessToken)(nil), "github.com.openshift.api.oauth.v1.OAuthAccessToken") + proto.RegisterType((*OAuthAccessTokenList)(nil), "github.com.openshift.api.oauth.v1.OAuthAccessTokenList") + proto.RegisterType((*OAuthAuthorizeToken)(nil), "github.com.openshift.api.oauth.v1.OAuthAuthorizeToken") + proto.RegisterType((*OAuthAuthorizeTokenList)(nil), "github.com.openshift.api.oauth.v1.OAuthAuthorizeTokenList") + proto.RegisterType((*OAuthClient)(nil), "github.com.openshift.api.oauth.v1.OAuthClient") + proto.RegisterType((*OAuthClientAuthorization)(nil), "github.com.openshift.api.oauth.v1.OAuthClientAuthorization") + proto.RegisterType((*OAuthClientAuthorizationList)(nil), "github.com.openshift.api.oauth.v1.OAuthClientAuthorizationList") + proto.RegisterType((*OAuthClientList)(nil), "github.com.openshift.api.oauth.v1.OAuthClientList") + proto.RegisterType((*OAuthRedirectReference)(nil), "github.com.openshift.api.oauth.v1.OAuthRedirectReference") + proto.RegisterType((*RedirectReference)(nil), "github.com.openshift.api.oauth.v1.RedirectReference") + proto.RegisterType((*ScopeRestriction)(nil), "github.com.openshift.api.oauth.v1.ScopeRestriction") + proto.RegisterType((*UserOAuthAccessToken)(nil), "github.com.openshift.api.oauth.v1.UserOAuthAccessToken") + proto.RegisterType((*UserOAuthAccessTokenList)(nil), "github.com.openshift.api.oauth.v1.UserOAuthAccessTokenList") +} + +func init() { + proto.RegisterFile("github.com/openshift/api/oauth/v1/generated.proto", fileDescriptor_bd688dca7ea39c8a) +} + +var fileDescriptor_bd688dca7ea39c8a = []byte{ + // 1272 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x58, 0xcf, 0x6f, 0x1b, 0xc5, + 0x17, 0xcf, 0x36, 0x76, 0x62, 0x3f, 0x37, 0xbf, 0x26, 0x4d, 0xbb, 0xdf, 0xb6, 0x5f, 0xdb, 0x75, + 0x24, 0x1a, 0x04, 0xac, 0x49, 0x28, 0xa5, 0x52, 0xa5, 0x4a, 0x76, 0xa8, 0x4a, 0x04, 0x69, 0xa5, + 0x49, 0x03, 0x15, 0xf4, 0xd0, 0xe9, 0xee, 0x8b, 0x3d, 0x64, 0xbd, 0xbb, 0xec, 0x8c, 0x43, 0x83, + 0x7a, 0xe0, 0xc2, 0x9d, 0x7f, 0x84, 0x0b, 0x77, 0x0e, 0x48, 0x1c, 0x7a, 0x42, 0x3d, 0x20, 0xd4, + 0x93, 0x45, 0x8c, 0x38, 0xf0, 0x2f, 0x70, 0x42, 0x3b, 0xbb, 0xde, 0x1f, 0x8e, 0x4d, 0xdc, 0x03, + 0x11, 0x87, 0xde, 0xbc, 0xef, 0x7d, 0x3e, 0x6f, 0xde, 0xcc, 0xbc, 0xcf, 0x9b, 0x19, 0xc3, 0x7a, + 0x8b, 0xcb, 0x76, 0xf7, 0xb1, 0x61, 0xba, 0x9d, 0xba, 0xeb, 0xa1, 0x23, 0xda, 0x7c, 0x4f, 0xd6, + 0x99, 0xc7, 0xeb, 0x2e, 0xeb, 0xca, 0x76, 0xfd, 0x60, 0xbd, 0xde, 0x42, 0x07, 0x7d, 0x26, 0xd1, + 0x32, 0x3c, 0xdf, 0x95, 0x2e, 0xb9, 0x92, 0x50, 0x8c, 0x98, 0x62, 0x30, 0x8f, 0x1b, 0x8a, 0x62, + 0x1c, 0xac, 0x5f, 0x7c, 0x2b, 0x15, 0xb5, 0xe5, 0xb6, 0xdc, 0xba, 0x62, 0x3e, 0xee, 0xee, 0xa9, + 0x2f, 0xf5, 0xa1, 0x7e, 0x85, 0x11, 0x2f, 0x5e, 0xdb, 0xbf, 0x21, 0x0c, 0xee, 0x06, 0xc3, 0x76, + 0x98, 0xd9, 0xe6, 0x0e, 0xfa, 0x87, 0x75, 0x6f, 0xbf, 0x15, 0x18, 0x44, 0xbd, 0x83, 0x92, 0x8d, + 0xc8, 0xe3, 0xe2, 0xf5, 0x71, 0x2c, 0xbf, 0xeb, 0x48, 0xde, 0xc1, 0xba, 0x30, 0xdb, 0xd8, 0x61, + 0xc3, 0xbc, 0xda, 0x0f, 0x1a, 0x5c, 0xda, 0xb4, 0xbb, 0x42, 0xa2, 0x4f, 0x5d, 0x1b, 0x77, 0x4c, + 0xd7, 0x43, 0x8a, 0x42, 0xfa, 0xdc, 0x94, 0xdc, 0x75, 0xc8, 0x1b, 0x50, 0xf4, 0x5d, 0x1b, 0xef, + 0xb2, 0x0e, 0x0a, 0x5d, 0xab, 0x4e, 0xaf, 0x15, 0x9b, 0x73, 0xfd, 0x5e, 0xa5, 0x48, 0x07, 0x46, + 0x9a, 0xf8, 0x89, 0x01, 0xe0, 0x04, 0x3f, 0x3c, 0x66, 0xa2, 0xd0, 0xcf, 0x28, 0xf4, 0x7c, 0xbf, + 0x57, 0x81, 0xbb, 0xb1, 0x95, 0xa6, 0x10, 0xa4, 0x01, 0x0b, 0xcc, 0xb6, 0xdd, 0x2f, 0x6f, 0x0b, + 0x93, 0xd9, 0x2c, 0x18, 0x4f, 0x9f, 0xae, 0x6a, 0x6b, 0x85, 0xe6, 0x85, 0x67, 0xbd, 0xca, 0x54, + 0xbf, 0x57, 0x59, 0x68, 0x64, 0xdd, 0x74, 0x18, 0x5f, 0xfb, 0x23, 0x07, 0x8b, 0xf7, 0x1a, 0x5d, + 0xd9, 0x6e, 0x98, 0x26, 0x0a, 0x71, 0xdf, 0xdd, 0x47, 0x87, 0x3c, 0x82, 0x42, 0xb0, 0x4e, 0x16, + 0x93, 0x4c, 0xd7, 0xaa, 0xda, 0x5a, 0x69, 0xe3, 0x6d, 0x23, 0x5c, 0x1f, 0x23, 0xbd, 0x3e, 0x86, + 0xb7, 0xdf, 0x0a, 0x0c, 0xc2, 0x08, 0xd0, 0xc6, 0xc1, 0xba, 0x71, 0xef, 0xf1, 0xe7, 0x68, 0xca, + 0x6d, 0x94, 0xac, 0x49, 0xa2, 0x14, 0x20, 0xb1, 0xd1, 0x38, 0x2a, 0xd9, 0x00, 0x30, 0x6d, 0x8e, + 0x8e, 0x0c, 0x66, 0xa6, 0x9f, 0xa9, 0x6a, 0x6b, 0xc5, 0x84, 0xb1, 0x19, 0x7b, 0x68, 0x0a, 0x45, + 0xea, 0x50, 0xc4, 0x27, 0x1e, 0xf7, 0x51, 0x6c, 0x85, 0xf3, 0x9c, 0x6e, 0x2e, 0x45, 0x94, 0xe2, + 0xed, 0x81, 0x83, 0x26, 0x18, 0x52, 0x83, 0x19, 0x11, 0xec, 0x87, 0xd0, 0x73, 0x6a, 0x29, 0xa1, + 0xdf, 0xab, 0xcc, 0xa8, 0x1d, 0x12, 0x34, 0xf2, 0x90, 0x77, 0xa1, 0xe4, 0xa3, 0xc5, 0x7d, 0x34, + 0xe5, 0x2e, 0xdd, 0xd2, 0xf3, 0x2a, 0x93, 0xe5, 0x28, 0x6c, 0x89, 0x26, 0x2e, 0x9a, 0xc6, 0x91, + 0x37, 0xa1, 0xd0, 0x15, 0xe8, 0xab, 0xec, 0x67, 0x14, 0x67, 0x31, 0xe2, 0x14, 0x76, 0x23, 0x3b, + 0x8d, 0x11, 0xe4, 0x75, 0x98, 0x0d, 0x7e, 0xef, 0x6e, 0xbd, 0xaf, 0xcf, 0x2a, 0xf0, 0x42, 0x04, + 0x9e, 0xdd, 0x0d, 0xcd, 0x74, 0xe0, 0x27, 0xb7, 0x60, 0x3e, 0xa8, 0x7b, 0xd7, 0xe7, 0x5f, 0xa1, + 0xda, 0x0c, 0xbd, 0xa0, 0x18, 0xe7, 0x23, 0xc6, 0x7c, 0x23, 0xe3, 0xa5, 0x43, 0x68, 0x72, 0x03, + 0xce, 0xfa, 0xb8, 0xe7, 0xa3, 0x68, 0x87, 0xec, 0xa2, 0x62, 0x9f, 0x8b, 0xd8, 0x67, 0x69, 0xca, + 0x47, 0x33, 0x48, 0xf2, 0x10, 0x74, 0xee, 0x30, 0x53, 0xf2, 0x03, 0x2e, 0x0f, 0xef, 0xf3, 0x0e, + 0xba, 0x5d, 0xb9, 0x83, 0xa6, 0xeb, 0x58, 0x42, 0x87, 0xaa, 0xb6, 0x96, 0x6f, 0x56, 0xa3, 0x28, + 0xfa, 0xd6, 0x18, 0x1c, 0x1d, 0x1b, 0xa1, 0xf6, 0xb3, 0x06, 0xe7, 0x86, 0xeb, 0xec, 0x23, 0x2e, + 0x24, 0x79, 0x78, 0xac, 0xd6, 0x8c, 0xc9, 0x6a, 0x2d, 0x60, 0xab, 0x4a, 0x8b, 0x57, 0x7e, 0x60, + 0x49, 0xd5, 0xd9, 0x03, 0xc8, 0x73, 0x89, 0x9d, 0x50, 0x4c, 0xa5, 0x8d, 0x77, 0x8c, 0x13, 0xdb, + 0x8d, 0x31, 0x9c, 0x65, 0x73, 0x2e, 0x8a, 0x9f, 0xdf, 0x0a, 0x22, 0xd1, 0x30, 0x60, 0xed, 0xc7, + 0x1c, 0x2c, 0x87, 0xd0, 0xec, 0x06, 0xbc, 0xd2, 0xce, 0x49, 0xda, 0x59, 0x85, 0xbc, 0x90, 0x4c, + 0x0e, 0x84, 0x13, 0x2f, 0xef, 0x4e, 0x60, 0xa4, 0xa1, 0x2f, 0x23, 0xb0, 0xd9, 0x97, 0x11, 0x58, + 0xe1, 0x04, 0x81, 0xdd, 0x84, 0x39, 0xd3, 0xb5, 0x70, 0xb3, 0xcd, 0x6c, 0x1b, 0x9d, 0x16, 0x46, + 0x0a, 0x59, 0x89, 0x08, 0x73, 0x9b, 0x69, 0x27, 0xcd, 0x62, 0xc9, 0x36, 0x2c, 0x67, 0x0c, 0xdb, + 0x28, 0xdb, 0xae, 0xa5, 0xe4, 0x51, 0x6c, 0x5e, 0x8a, 0x42, 0x2c, 0x6f, 0x1e, 0x87, 0xd0, 0x51, + 0xbc, 0xda, 0x2f, 0x1a, 0x5c, 0x18, 0x51, 0x43, 0xa7, 0xa0, 0x8b, 0xcf, 0xb2, 0xba, 0xb8, 0x3e, + 0xb1, 0x2e, 0x32, 0x89, 0x8e, 0x91, 0xc6, 0x37, 0x33, 0x50, 0x52, 0xe8, 0xb0, 0x18, 0x4f, 0x41, + 0x12, 0xaf, 0xc1, 0x8c, 0x40, 0xd3, 0x47, 0x19, 0xc9, 0x61, 0x3e, 0x42, 0xcf, 0xec, 0x28, 0x2b, + 0x8d, 0xbc, 0x64, 0x13, 0x96, 0x98, 0x65, 0xf1, 0xe0, 0xe4, 0x63, 0x76, 0xe8, 0x13, 0xfa, 0xb4, + 0x2a, 0xf0, 0x95, 0x7e, 0xaf, 0xb2, 0xd4, 0x18, 0x76, 0xd2, 0xe3, 0x78, 0xb2, 0x03, 0x2b, 0x3e, + 0x0a, 0xcf, 0x75, 0xac, 0x4f, 0xb8, 0x6c, 0xc7, 0x7b, 0x1a, 0x28, 0x25, 0x38, 0x7b, 0xff, 0x1f, + 0x8d, 0xbd, 0x42, 0x47, 0x81, 0xe8, 0x68, 0x2e, 0xb9, 0x16, 0xf4, 0xed, 0x58, 0x23, 0x42, 0xcf, + 0xab, 0xa4, 0x16, 0xc3, 0x9e, 0x9d, 0xd8, 0x69, 0x06, 0x45, 0xb6, 0xa0, 0xd4, 0xf2, 0x99, 0x23, + 0xa3, 0x3a, 0x0c, 0x05, 0x75, 0x75, 0xa0, 0xc0, 0x3b, 0x89, 0xeb, 0xaf, 0x5e, 0x65, 0x51, 0x7d, + 0x7e, 0xc0, 0x1c, 0xcb, 0x46, 0xff, 0xfe, 0xa1, 0x87, 0x34, 0xcd, 0x25, 0x4f, 0x61, 0x49, 0x0c, + 0x5d, 0x5e, 0x84, 0x3e, 0x3b, 0x71, 0xd7, 0x1c, 0xbe, 0xf8, 0x34, 0xff, 0x17, 0x65, 0xb1, 0x34, + 0xec, 0x11, 0xf4, 0xf8, 0x40, 0xe4, 0x01, 0xe8, 0x2c, 0x69, 0xb9, 0xdb, 0xec, 0x49, 0xa3, 0x85, + 0x83, 0xc3, 0xa7, 0xa0, 0x0e, 0x9f, 0xcb, 0xc1, 0xc1, 0xd3, 0x18, 0x83, 0xa1, 0x63, 0xd9, 0xe4, + 0x10, 0x56, 0x53, 0xbe, 0x71, 0x27, 0x97, 0xea, 0x02, 0xf9, 0xe6, 0xd5, 0x7e, 0xaf, 0xb2, 0xda, + 0x38, 0x19, 0x4e, 0x27, 0x89, 0x59, 0xfb, 0xee, 0x0c, 0xe8, 0x29, 0x1d, 0x0c, 0xb4, 0xa3, 0x2e, + 0x5e, 0xff, 0xd1, 0x73, 0x22, 0xdd, 0x76, 0xa7, 0x5f, 0xa6, 0xed, 0xe6, 0x4e, 0x68, 0xbb, 0xc9, + 0x79, 0x92, 0x1f, 0x77, 0x9e, 0xd4, 0x7a, 0x1a, 0x5c, 0x1e, 0xb7, 0x5e, 0xa7, 0xd0, 0x13, 0x1f, + 0x65, 0x7b, 0xe2, 0xcd, 0x49, 0x7b, 0xe2, 0x88, 0x6c, 0xc7, 0x34, 0xc6, 0x9f, 0x34, 0x58, 0x48, + 0x51, 0x4e, 0x61, 0x4e, 0x3b, 0xd9, 0x39, 0x19, 0x2f, 0x37, 0xa7, 0x31, 0xd3, 0x38, 0xd2, 0xe0, + 0xbc, 0x42, 0x0d, 0x3a, 0x13, 0xc5, 0x3d, 0xf4, 0xd1, 0x31, 0xf1, 0x14, 0xaa, 0x1a, 0xa1, 0xe8, + 0x0f, 0x86, 0x53, 0x45, 0x5d, 0xda, 0xb8, 0x36, 0xc1, 0xac, 0x8e, 0xa5, 0x9a, 0xdc, 0x7f, 0x62, + 0x13, 0x4d, 0x22, 0xd7, 0x9e, 0xc2, 0xd2, 0xf1, 0xd9, 0xad, 0x42, 0xbe, 0xe5, 0xbb, 0x5d, 0x4f, + 0x4d, 0x2d, 0x75, 0x73, 0xb9, 0x13, 0x18, 0x69, 0xe8, 0x23, 0x55, 0xc8, 0xed, 0x73, 0xc7, 0x8a, + 0x04, 0x77, 0x36, 0xc2, 0xe4, 0x3e, 0xe4, 0x8e, 0x45, 0x95, 0x27, 0x40, 0x38, 0x89, 0xc0, 0x62, + 0x84, 0x12, 0x97, 0xf2, 0xd4, 0xbe, 0xd7, 0x60, 0x71, 0xc4, 0x53, 0xb2, 0x60, 0x73, 0x89, 0x3e, + 0xb3, 0x07, 0x2f, 0xc9, 0x85, 0xa0, 0xcb, 0xdf, 0x7e, 0xc2, 0x4c, 0xf9, 0x31, 0xb3, 0xbb, 0x28, + 0x68, 0x0c, 0x20, 0x5f, 0x40, 0xc9, 0x4c, 0x9e, 0xa5, 0xd1, 0x42, 0xdd, 0x9a, 0x60, 0xa1, 0xfe, + 0xe1, 0x31, 0x1b, 0x8e, 0x97, 0x02, 0xd0, 0xf4, 0x18, 0xb5, 0x3f, 0x73, 0x70, 0x2e, 0xd0, 0xfd, + 0xab, 0xe7, 0xe4, 0xab, 0xe7, 0xe4, 0xbf, 0xfd, 0x9c, 0xfc, 0x55, 0x03, 0x7d, 0x54, 0xad, 0x9d, + 0x42, 0x4b, 0x7d, 0x98, 0x6d, 0xa9, 0xef, 0x4d, 0xa0, 0xa9, 0x51, 0x99, 0x8e, 0xee, 0xad, 0xcd, + 0x3b, 0xcf, 0x8e, 0xca, 0x53, 0xcf, 0x8f, 0xca, 0x53, 0x2f, 0x8e, 0xca, 0x53, 0x5f, 0xf7, 0xcb, + 0xda, 0xb3, 0x7e, 0x59, 0x7b, 0xde, 0x2f, 0x6b, 0x2f, 0xfa, 0x65, 0xed, 0xb7, 0x7e, 0x59, 0xfb, + 0xf6, 0xf7, 0xf2, 0xd4, 0xa7, 0x57, 0x4e, 0xfc, 0xa3, 0xed, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, + 0xc6, 0xcf, 0x36, 0xd6, 0x8c, 0x13, 0x00, 0x00, +} + +func (m *ClusterRoleScopeRestriction) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterRoleScopeRestriction) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterRoleScopeRestriction) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i-- + if m.AllowEscalation { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + if len(m.Namespaces) > 0 { + for iNdEx := len(m.Namespaces) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Namespaces[iNdEx]) + copy(dAtA[i:], m.Namespaces[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespaces[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.RoleNames) > 0 { + for iNdEx := len(m.RoleNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.RoleNames[iNdEx]) + copy(dAtA[i:], m.RoleNames[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RoleNames[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *OAuthAccessToken) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OAuthAccessToken) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *OAuthAccessToken) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.InactivityTimeoutSeconds)) + i-- + dAtA[i] = 0x50 + i -= len(m.RefreshToken) + copy(dAtA[i:], m.RefreshToken) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RefreshToken))) + i-- + dAtA[i] = 0x4a + i -= len(m.AuthorizeToken) + copy(dAtA[i:], m.AuthorizeToken) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AuthorizeToken))) + i-- + dAtA[i] = 0x42 + i -= len(m.UserUID) + copy(dAtA[i:], m.UserUID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UserUID))) + i-- + dAtA[i] = 0x3a + i -= len(m.UserName) + copy(dAtA[i:], m.UserName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UserName))) + i-- + dAtA[i] = 0x32 + i -= len(m.RedirectURI) + copy(dAtA[i:], m.RedirectURI) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RedirectURI))) + i-- + dAtA[i] = 0x2a + if len(m.Scopes) > 0 { + for iNdEx := len(m.Scopes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Scopes[iNdEx]) + copy(dAtA[i:], m.Scopes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Scopes[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + i = encodeVarintGenerated(dAtA, i, uint64(m.ExpiresIn)) + i-- + dAtA[i] = 0x18 + i -= len(m.ClientName) + copy(dAtA[i:], m.ClientName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClientName))) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *OAuthAccessTokenList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OAuthAccessTokenList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *OAuthAccessTokenList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *OAuthAuthorizeToken) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OAuthAuthorizeToken) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *OAuthAuthorizeToken) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.CodeChallengeMethod) + copy(dAtA[i:], m.CodeChallengeMethod) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CodeChallengeMethod))) + i-- + dAtA[i] = 0x52 + i -= len(m.CodeChallenge) + copy(dAtA[i:], m.CodeChallenge) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CodeChallenge))) + i-- + dAtA[i] = 0x4a + i -= len(m.UserUID) + copy(dAtA[i:], m.UserUID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UserUID))) + i-- + dAtA[i] = 0x42 + i -= len(m.UserName) + copy(dAtA[i:], m.UserName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UserName))) + i-- + dAtA[i] = 0x3a + i -= len(m.State) + copy(dAtA[i:], m.State) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.State))) + i-- + dAtA[i] = 0x32 + i -= len(m.RedirectURI) + copy(dAtA[i:], m.RedirectURI) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RedirectURI))) + i-- + dAtA[i] = 0x2a + if len(m.Scopes) > 0 { + for iNdEx := len(m.Scopes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Scopes[iNdEx]) + copy(dAtA[i:], m.Scopes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Scopes[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + i = encodeVarintGenerated(dAtA, i, uint64(m.ExpiresIn)) + i-- + dAtA[i] = 0x18 + i -= len(m.ClientName) + copy(dAtA[i:], m.ClientName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClientName))) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *OAuthAuthorizeTokenList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OAuthAuthorizeTokenList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *OAuthAuthorizeTokenList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *OAuthClient) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OAuthClient) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *OAuthClient) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.AccessTokenInactivityTimeoutSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.AccessTokenInactivityTimeoutSeconds)) + i-- + dAtA[i] = 0x48 + } + if m.AccessTokenMaxAgeSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.AccessTokenMaxAgeSeconds)) + i-- + dAtA[i] = 0x40 + } + if len(m.ScopeRestrictions) > 0 { + for iNdEx := len(m.ScopeRestrictions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ScopeRestrictions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + } + i -= len(m.GrantMethod) + copy(dAtA[i:], m.GrantMethod) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.GrantMethod))) + i-- + dAtA[i] = 0x32 + if len(m.RedirectURIs) > 0 { + for iNdEx := len(m.RedirectURIs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.RedirectURIs[iNdEx]) + copy(dAtA[i:], m.RedirectURIs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RedirectURIs[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + i-- + if m.RespondWithChallenges { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + if len(m.AdditionalSecrets) > 0 { + for iNdEx := len(m.AdditionalSecrets) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AdditionalSecrets[iNdEx]) + copy(dAtA[i:], m.AdditionalSecrets[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AdditionalSecrets[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.Secret) + copy(dAtA[i:], m.Secret) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Secret))) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *OAuthClientAuthorization) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OAuthClientAuthorization) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *OAuthClientAuthorization) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Scopes) > 0 { + for iNdEx := len(m.Scopes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Scopes[iNdEx]) + copy(dAtA[i:], m.Scopes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Scopes[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + i -= len(m.UserUID) + copy(dAtA[i:], m.UserUID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UserUID))) + i-- + dAtA[i] = 0x22 + i -= len(m.UserName) + copy(dAtA[i:], m.UserName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UserName))) + i-- + dAtA[i] = 0x1a + i -= len(m.ClientName) + copy(dAtA[i:], m.ClientName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClientName))) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *OAuthClientAuthorizationList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OAuthClientAuthorizationList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *OAuthClientAuthorizationList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *OAuthClientList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OAuthClientList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *OAuthClientList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *OAuthRedirectReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OAuthRedirectReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *OAuthRedirectReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Reference.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RedirectReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RedirectReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RedirectReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x1a + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0x12 + i -= len(m.Group) + copy(dAtA[i:], m.Group) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ScopeRestriction) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScopeRestriction) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ScopeRestriction) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ClusterRole != nil { + { + size, err := m.ClusterRole.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.ExactValues) > 0 { + for iNdEx := len(m.ExactValues) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ExactValues[iNdEx]) + copy(dAtA[i:], m.ExactValues[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ExactValues[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *UserOAuthAccessToken) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UserOAuthAccessToken) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UserOAuthAccessToken) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.InactivityTimeoutSeconds)) + i-- + dAtA[i] = 0x50 + i -= len(m.RefreshToken) + copy(dAtA[i:], m.RefreshToken) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RefreshToken))) + i-- + dAtA[i] = 0x4a + i -= len(m.AuthorizeToken) + copy(dAtA[i:], m.AuthorizeToken) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AuthorizeToken))) + i-- + dAtA[i] = 0x42 + i -= len(m.UserUID) + copy(dAtA[i:], m.UserUID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UserUID))) + i-- + dAtA[i] = 0x3a + i -= len(m.UserName) + copy(dAtA[i:], m.UserName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UserName))) + i-- + dAtA[i] = 0x32 + i -= len(m.RedirectURI) + copy(dAtA[i:], m.RedirectURI) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RedirectURI))) + i-- + dAtA[i] = 0x2a + if len(m.Scopes) > 0 { + for iNdEx := len(m.Scopes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Scopes[iNdEx]) + copy(dAtA[i:], m.Scopes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Scopes[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + i = encodeVarintGenerated(dAtA, i, uint64(m.ExpiresIn)) + i-- + dAtA[i] = 0x18 + i -= len(m.ClientName) + copy(dAtA[i:], m.ClientName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClientName))) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *UserOAuthAccessTokenList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UserOAuthAccessTokenList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UserOAuthAccessTokenList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ClusterRoleScopeRestriction) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.RoleNames) > 0 { + for _, s := range m.RoleNames { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Namespaces) > 0 { + for _, s := range m.Namespaces { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 2 + return n +} + +func (m *OAuthAccessToken) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ClientName) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.ExpiresIn)) + if len(m.Scopes) > 0 { + for _, s := range m.Scopes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.RedirectURI) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UserName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UserUID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.AuthorizeToken) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.RefreshToken) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.InactivityTimeoutSeconds)) + return n +} + +func (m *OAuthAccessTokenList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *OAuthAuthorizeToken) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ClientName) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.ExpiresIn)) + if len(m.Scopes) > 0 { + for _, s := range m.Scopes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.RedirectURI) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.State) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UserName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UserUID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.CodeChallenge) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.CodeChallengeMethod) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *OAuthAuthorizeTokenList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *OAuthClient) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Secret) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.AdditionalSecrets) > 0 { + for _, s := range m.AdditionalSecrets { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 2 + if len(m.RedirectURIs) > 0 { + for _, s := range m.RedirectURIs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.GrantMethod) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.ScopeRestrictions) > 0 { + for _, e := range m.ScopeRestrictions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.AccessTokenMaxAgeSeconds != nil { + n += 1 + sovGenerated(uint64(*m.AccessTokenMaxAgeSeconds)) + } + if m.AccessTokenInactivityTimeoutSeconds != nil { + n += 1 + sovGenerated(uint64(*m.AccessTokenInactivityTimeoutSeconds)) + } + return n +} + +func (m *OAuthClientAuthorization) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ClientName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UserName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UserUID) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Scopes) > 0 { + for _, s := range m.Scopes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *OAuthClientAuthorizationList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *OAuthClientList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *OAuthRedirectReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Reference.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *RedirectReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Group) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ScopeRestriction) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ExactValues) > 0 { + for _, s := range m.ExactValues { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.ClusterRole != nil { + l = m.ClusterRole.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *UserOAuthAccessToken) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ClientName) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.ExpiresIn)) + if len(m.Scopes) > 0 { + for _, s := range m.Scopes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.RedirectURI) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UserName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UserUID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.AuthorizeToken) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.RefreshToken) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.InactivityTimeoutSeconds)) + return n +} + +func (m *UserOAuthAccessTokenList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *ClusterRoleScopeRestriction) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClusterRoleScopeRestriction{`, + `RoleNames:` + fmt.Sprintf("%v", this.RoleNames) + `,`, + `Namespaces:` + fmt.Sprintf("%v", this.Namespaces) + `,`, + `AllowEscalation:` + fmt.Sprintf("%v", this.AllowEscalation) + `,`, + `}`, + }, "") + return s +} +func (this *OAuthAccessToken) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&OAuthAccessToken{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ClientName:` + fmt.Sprintf("%v", this.ClientName) + `,`, + `ExpiresIn:` + fmt.Sprintf("%v", this.ExpiresIn) + `,`, + `Scopes:` + fmt.Sprintf("%v", this.Scopes) + `,`, + `RedirectURI:` + fmt.Sprintf("%v", this.RedirectURI) + `,`, + `UserName:` + fmt.Sprintf("%v", this.UserName) + `,`, + `UserUID:` + fmt.Sprintf("%v", this.UserUID) + `,`, + `AuthorizeToken:` + fmt.Sprintf("%v", this.AuthorizeToken) + `,`, + `RefreshToken:` + fmt.Sprintf("%v", this.RefreshToken) + `,`, + `InactivityTimeoutSeconds:` + fmt.Sprintf("%v", this.InactivityTimeoutSeconds) + `,`, + `}`, + }, "") + return s +} +func (this *OAuthAccessTokenList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]OAuthAccessToken{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "OAuthAccessToken", "OAuthAccessToken", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&OAuthAccessTokenList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *OAuthAuthorizeToken) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&OAuthAuthorizeToken{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ClientName:` + fmt.Sprintf("%v", this.ClientName) + `,`, + `ExpiresIn:` + fmt.Sprintf("%v", this.ExpiresIn) + `,`, + `Scopes:` + fmt.Sprintf("%v", this.Scopes) + `,`, + `RedirectURI:` + fmt.Sprintf("%v", this.RedirectURI) + `,`, + `State:` + fmt.Sprintf("%v", this.State) + `,`, + `UserName:` + fmt.Sprintf("%v", this.UserName) + `,`, + `UserUID:` + fmt.Sprintf("%v", this.UserUID) + `,`, + `CodeChallenge:` + fmt.Sprintf("%v", this.CodeChallenge) + `,`, + `CodeChallengeMethod:` + fmt.Sprintf("%v", this.CodeChallengeMethod) + `,`, + `}`, + }, "") + return s +} +func (this *OAuthAuthorizeTokenList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]OAuthAuthorizeToken{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "OAuthAuthorizeToken", "OAuthAuthorizeToken", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&OAuthAuthorizeTokenList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *OAuthClient) String() string { + if this == nil { + return "nil" + } + repeatedStringForScopeRestrictions := "[]ScopeRestriction{" + for _, f := range this.ScopeRestrictions { + repeatedStringForScopeRestrictions += strings.Replace(strings.Replace(f.String(), "ScopeRestriction", "ScopeRestriction", 1), `&`, ``, 1) + "," + } + repeatedStringForScopeRestrictions += "}" + s := strings.Join([]string{`&OAuthClient{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Secret:` + fmt.Sprintf("%v", this.Secret) + `,`, + `AdditionalSecrets:` + fmt.Sprintf("%v", this.AdditionalSecrets) + `,`, + `RespondWithChallenges:` + fmt.Sprintf("%v", this.RespondWithChallenges) + `,`, + `RedirectURIs:` + fmt.Sprintf("%v", this.RedirectURIs) + `,`, + `GrantMethod:` + fmt.Sprintf("%v", this.GrantMethod) + `,`, + `ScopeRestrictions:` + repeatedStringForScopeRestrictions + `,`, + `AccessTokenMaxAgeSeconds:` + valueToStringGenerated(this.AccessTokenMaxAgeSeconds) + `,`, + `AccessTokenInactivityTimeoutSeconds:` + valueToStringGenerated(this.AccessTokenInactivityTimeoutSeconds) + `,`, + `}`, + }, "") + return s +} +func (this *OAuthClientAuthorization) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&OAuthClientAuthorization{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ClientName:` + fmt.Sprintf("%v", this.ClientName) + `,`, + `UserName:` + fmt.Sprintf("%v", this.UserName) + `,`, + `UserUID:` + fmt.Sprintf("%v", this.UserUID) + `,`, + `Scopes:` + fmt.Sprintf("%v", this.Scopes) + `,`, + `}`, + }, "") + return s +} +func (this *OAuthClientAuthorizationList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]OAuthClientAuthorization{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "OAuthClientAuthorization", "OAuthClientAuthorization", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&OAuthClientAuthorizationList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *OAuthClientList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]OAuthClient{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "OAuthClient", "OAuthClient", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&OAuthClientList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *OAuthRedirectReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&OAuthRedirectReference{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Reference:` + strings.Replace(strings.Replace(this.Reference.String(), "RedirectReference", "RedirectReference", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *RedirectReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RedirectReference{`, + `Group:` + fmt.Sprintf("%v", this.Group) + `,`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *ScopeRestriction) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ScopeRestriction{`, + `ExactValues:` + fmt.Sprintf("%v", this.ExactValues) + `,`, + `ClusterRole:` + strings.Replace(this.ClusterRole.String(), "ClusterRoleScopeRestriction", "ClusterRoleScopeRestriction", 1) + `,`, + `}`, + }, "") + return s +} +func (this *UserOAuthAccessToken) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UserOAuthAccessToken{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ClientName:` + fmt.Sprintf("%v", this.ClientName) + `,`, + `ExpiresIn:` + fmt.Sprintf("%v", this.ExpiresIn) + `,`, + `Scopes:` + fmt.Sprintf("%v", this.Scopes) + `,`, + `RedirectURI:` + fmt.Sprintf("%v", this.RedirectURI) + `,`, + `UserName:` + fmt.Sprintf("%v", this.UserName) + `,`, + `UserUID:` + fmt.Sprintf("%v", this.UserUID) + `,`, + `AuthorizeToken:` + fmt.Sprintf("%v", this.AuthorizeToken) + `,`, + `RefreshToken:` + fmt.Sprintf("%v", this.RefreshToken) + `,`, + `InactivityTimeoutSeconds:` + fmt.Sprintf("%v", this.InactivityTimeoutSeconds) + `,`, + `}`, + }, "") + return s +} +func (this *UserOAuthAccessTokenList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]UserOAuthAccessToken{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "UserOAuthAccessToken", "UserOAuthAccessToken", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&UserOAuthAccessTokenList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ClusterRoleScopeRestriction) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterRoleScopeRestriction: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterRoleScopeRestriction: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RoleNames", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RoleNames = append(m.RoleNames, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespaces", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespaces = append(m.Namespaces, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowEscalation", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AllowEscalation = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OAuthAccessToken) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OAuthAccessToken: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OAuthAccessToken: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClientName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExpiresIn", wireType) + } + m.ExpiresIn = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ExpiresIn |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scopes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Scopes = append(m.Scopes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RedirectURI", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RedirectURI = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UserName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserUID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UserUID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthorizeToken", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AuthorizeToken = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RefreshToken", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RefreshToken = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InactivityTimeoutSeconds", wireType) + } + m.InactivityTimeoutSeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.InactivityTimeoutSeconds |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OAuthAccessTokenList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OAuthAccessTokenList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OAuthAccessTokenList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, OAuthAccessToken{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OAuthAuthorizeToken) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OAuthAuthorizeToken: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OAuthAuthorizeToken: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClientName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExpiresIn", wireType) + } + m.ExpiresIn = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ExpiresIn |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scopes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Scopes = append(m.Scopes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RedirectURI", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RedirectURI = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.State = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UserName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserUID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UserUID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CodeChallenge", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CodeChallenge = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CodeChallengeMethod", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CodeChallengeMethod = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OAuthAuthorizeTokenList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OAuthAuthorizeTokenList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OAuthAuthorizeTokenList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, OAuthAuthorizeToken{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OAuthClient) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OAuthClient: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OAuthClient: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Secret = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AdditionalSecrets", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AdditionalSecrets = append(m.AdditionalSecrets, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RespondWithChallenges", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.RespondWithChallenges = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RedirectURIs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RedirectURIs = append(m.RedirectURIs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GrantMethod", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.GrantMethod = GrantHandlerType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ScopeRestrictions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ScopeRestrictions = append(m.ScopeRestrictions, ScopeRestriction{}) + if err := m.ScopeRestrictions[len(m.ScopeRestrictions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AccessTokenMaxAgeSeconds", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AccessTokenMaxAgeSeconds = &v + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AccessTokenInactivityTimeoutSeconds", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AccessTokenInactivityTimeoutSeconds = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OAuthClientAuthorization) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OAuthClientAuthorization: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OAuthClientAuthorization: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClientName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UserName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserUID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UserUID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scopes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Scopes = append(m.Scopes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OAuthClientAuthorizationList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OAuthClientAuthorizationList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OAuthClientAuthorizationList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, OAuthClientAuthorization{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OAuthClientList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OAuthClientList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OAuthClientList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, OAuthClient{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OAuthRedirectReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OAuthRedirectReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OAuthRedirectReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reference", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Reference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RedirectReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RedirectReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RedirectReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Group = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ScopeRestriction) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ScopeRestriction: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ScopeRestriction: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExactValues", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExactValues = append(m.ExactValues, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterRole", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ClusterRole == nil { + m.ClusterRole = &ClusterRoleScopeRestriction{} + } + if err := m.ClusterRole.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UserOAuthAccessToken) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UserOAuthAccessToken: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UserOAuthAccessToken: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClientName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExpiresIn", wireType) + } + m.ExpiresIn = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ExpiresIn |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scopes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Scopes = append(m.Scopes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RedirectURI", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RedirectURI = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UserName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UserUID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UserUID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuthorizeToken", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AuthorizeToken = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RefreshToken", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RefreshToken = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InactivityTimeoutSeconds", wireType) + } + m.InactivityTimeoutSeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.InactivityTimeoutSeconds |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UserOAuthAccessTokenList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UserOAuthAccessTokenList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UserOAuthAccessTokenList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, UserOAuthAccessToken{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/openshift/api/oauth/v1/generated.proto b/vendor/github.com/openshift/api/oauth/v1/generated.proto new file mode 100644 index 000000000..829025a83 --- /dev/null +++ b/vendor/github.com/openshift/api/oauth/v1/generated.proto @@ -0,0 +1,321 @@ + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package github.com.openshift.api.oauth.v1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "github.com/openshift/api/oauth/v1"; + +// ClusterRoleScopeRestriction describes restrictions on cluster role scopes +message ClusterRoleScopeRestriction { + // RoleNames is the list of cluster roles that can referenced. * means anything + repeated string roleNames = 1; + + // Namespaces is the list of namespaces that can be referenced. * means any of them (including *) + repeated string namespaces = 2; + + // AllowEscalation indicates whether you can request roles and their escalating resources + optional bool allowEscalation = 3; +} + +// OAuthAccessToken describes an OAuth access token. +// The name of a token must be prefixed with a `sha256~` string, must not contain "/" or "%" characters and must be at +// least 32 characters long. +// +// The name of the token is constructed from the actual token by sha256-hashing it and using URL-safe unpadded +// base64-encoding (as described in RFC4648) on the hashed result. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message OAuthAccessToken { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // ClientName references the client that created this token. + optional string clientName = 2; + + // ExpiresIn is the seconds from CreationTime before this token expires. + optional int64 expiresIn = 3; + + // Scopes is an array of the requested scopes. + repeated string scopes = 4; + + // RedirectURI is the redirection associated with the token. + optional string redirectURI = 5; + + // UserName is the user name associated with this token + optional string userName = 6; + + // UserUID is the unique UID associated with this token + optional string userUID = 7; + + // AuthorizeToken contains the token that authorized this token + optional string authorizeToken = 8; + + // RefreshToken is the value by which this token can be renewed. Can be blank. + optional string refreshToken = 9; + + // InactivityTimeoutSeconds is the value in seconds, from the + // CreationTimestamp, after which this token can no longer be used. + // The value is automatically incremented when the token is used. + optional int32 inactivityTimeoutSeconds = 10; +} + +// OAuthAccessTokenList is a collection of OAuth access tokens +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message OAuthAccessTokenList { + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of OAuth access tokens + repeated OAuthAccessToken items = 2; +} + +// OAuthAuthorizeToken describes an OAuth authorization token +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message OAuthAuthorizeToken { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // ClientName references the client that created this token. + optional string clientName = 2; + + // ExpiresIn is the seconds from CreationTime before this token expires. + optional int64 expiresIn = 3; + + // Scopes is an array of the requested scopes. + repeated string scopes = 4; + + // RedirectURI is the redirection associated with the token. + optional string redirectURI = 5; + + // State data from request + optional string state = 6; + + // UserName is the user name associated with this token + optional string userName = 7; + + // UserUID is the unique UID associated with this token. UserUID and UserName must both match + // for this token to be valid. + optional string userUID = 8; + + // CodeChallenge is the optional code_challenge associated with this authorization code, as described in rfc7636 + optional string codeChallenge = 9; + + // CodeChallengeMethod is the optional code_challenge_method associated with this authorization code, as described in rfc7636 + optional string codeChallengeMethod = 10; +} + +// OAuthAuthorizeTokenList is a collection of OAuth authorization tokens +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message OAuthAuthorizeTokenList { + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of OAuth authorization tokens + repeated OAuthAuthorizeToken items = 2; +} + +// OAuthClient describes an OAuth client +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message OAuthClient { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Secret is the unique secret associated with a client + optional string secret = 2; + + // AdditionalSecrets holds other secrets that may be used to identify the client. This is useful for rotation + // and for service account token validation + repeated string additionalSecrets = 3; + + // RespondWithChallenges indicates whether the client wants authentication needed responses made in the form of challenges instead of redirects + optional bool respondWithChallenges = 4; + + // RedirectURIs is the valid redirection URIs associated with a client + // +patchStrategy=merge + repeated string redirectURIs = 5; + + // GrantMethod is a required field which determines how to handle grants for this client. + // Valid grant handling methods are: + // - auto: always approves grant requests, useful for trusted clients + // - prompt: prompts the end user for approval of grant requests, useful for third-party clients + optional string grantMethod = 6; + + // ScopeRestrictions describes which scopes this client can request. Each requested scope + // is checked against each restriction. If any restriction matches, then the scope is allowed. + // If no restriction matches, then the scope is denied. + repeated ScopeRestriction scopeRestrictions = 7; + + // AccessTokenMaxAgeSeconds overrides the default access token max age for tokens granted to this client. + // 0 means no expiration. + optional int32 accessTokenMaxAgeSeconds = 8; + + // AccessTokenInactivityTimeoutSeconds overrides the default token + // inactivity timeout for tokens granted to this client. + // The value represents the maximum amount of time that can occur between + // consecutive uses of the token. Tokens become invalid if they are not + // used within this temporal window. The user will need to acquire a new + // token to regain access once a token times out. + // This value needs to be set only if the default set in configuration is + // not appropriate for this client. Valid values are: + // - 0: Tokens for this client never time out + // - X: Tokens time out if there is no activity for X seconds + // The current minimum allowed value for X is 300 (5 minutes) + // + // WARNING: existing tokens' timeout will not be affected (lowered) by changing this value + optional int32 accessTokenInactivityTimeoutSeconds = 9; +} + +// OAuthClientAuthorization describes an authorization created by an OAuth client +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message OAuthClientAuthorization { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // ClientName references the client that created this authorization + optional string clientName = 2; + + // UserName is the user name that authorized this client + optional string userName = 3; + + // UserUID is the unique UID associated with this authorization. UserUID and UserName + // must both match for this authorization to be valid. + optional string userUID = 4; + + // Scopes is an array of the granted scopes. + repeated string scopes = 5; +} + +// OAuthClientAuthorizationList is a collection of OAuth client authorizations +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message OAuthClientAuthorizationList { + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of OAuth client authorizations + repeated OAuthClientAuthorization items = 2; +} + +// OAuthClientList is a collection of OAuth clients +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message OAuthClientList { + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of OAuth clients + repeated OAuthClient items = 2; +} + +// OAuthRedirectReference is a reference to an OAuth redirect object. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message OAuthRedirectReference { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // The reference to an redirect object in the current namespace. + optional RedirectReference reference = 2; +} + +// RedirectReference specifies the target in the current namespace that resolves into redirect URIs. Only the 'Route' kind is currently allowed. +message RedirectReference { + // The group of the target that is being referred to. + optional string group = 1; + + // The kind of the target that is being referred to. Currently, only 'Route' is allowed. + optional string kind = 2; + + // The name of the target that is being referred to. e.g. name of the Route. + optional string name = 3; +} + +// ScopeRestriction describe one restriction on scopes. Exactly one option must be non-nil. +message ScopeRestriction { + // ExactValues means the scope has to match a particular set of strings exactly + repeated string literals = 1; + + // ClusterRole describes a set of restrictions for cluster role scoping. + optional ClusterRoleScopeRestriction clusterRole = 2; +} + +// UserOAuthAccessToken is a virtual resource to mirror OAuthAccessTokens to +// the user the access token was issued for +// +openshift:compatibility-gen:level=1 +message UserOAuthAccessToken { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // ClientName references the client that created this token. + optional string clientName = 2; + + // ExpiresIn is the seconds from CreationTime before this token expires. + optional int64 expiresIn = 3; + + // Scopes is an array of the requested scopes. + repeated string scopes = 4; + + // RedirectURI is the redirection associated with the token. + optional string redirectURI = 5; + + // UserName is the user name associated with this token + optional string userName = 6; + + // UserUID is the unique UID associated with this token + optional string userUID = 7; + + // AuthorizeToken contains the token that authorized this token + optional string authorizeToken = 8; + + // RefreshToken is the value by which this token can be renewed. Can be blank. + optional string refreshToken = 9; + + // InactivityTimeoutSeconds is the value in seconds, from the + // CreationTimestamp, after which this token can no longer be used. + // The value is automatically incremented when the token is used. + optional int32 inactivityTimeoutSeconds = 10; +} + +// UserOAuthAccessTokenList is a collection of access tokens issued on behalf of +// the requesting user +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message UserOAuthAccessTokenList { + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + repeated UserOAuthAccessToken items = 2; +} + diff --git a/vendor/github.com/openshift/api/oauth/v1/legacy.go b/vendor/github.com/openshift/api/oauth/v1/legacy.go new file mode 100644 index 000000000..65b57d243 --- /dev/null +++ b/vendor/github.com/openshift/api/oauth/v1/legacy.go @@ -0,0 +1,30 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + legacyGroupVersion = schema.GroupVersion{Group: "", Version: "v1"} + legacySchemeBuilder = runtime.NewSchemeBuilder(addLegacyKnownTypes, corev1.AddToScheme, extensionsv1beta1.AddToScheme) + DeprecatedInstallWithoutGroup = legacySchemeBuilder.AddToScheme +) + +func addLegacyKnownTypes(scheme *runtime.Scheme) error { + types := []runtime.Object{ + &OAuthAccessToken{}, + &OAuthAccessTokenList{}, + &OAuthAuthorizeToken{}, + &OAuthAuthorizeTokenList{}, + &OAuthClient{}, + &OAuthClientList{}, + &OAuthClientAuthorization{}, + &OAuthClientAuthorizationList{}, + &OAuthRedirectReference{}, + } + scheme.AddKnownTypes(legacyGroupVersion, types...) + return nil +} diff --git a/vendor/github.com/openshift/api/oauth/v1/register.go b/vendor/github.com/openshift/api/oauth/v1/register.go new file mode 100644 index 000000000..9992dffea --- /dev/null +++ b/vendor/github.com/openshift/api/oauth/v1/register.go @@ -0,0 +1,47 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + GroupName = "oauth.openshift.io" + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // Install is a function which adds this version to a scheme + Install = schemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = schemeBuilder.AddToScheme +) + +// Resource generated code relies on this being here, but it logically belongs to the group +// DEPRECATED +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &OAuthAccessToken{}, + &OAuthAccessTokenList{}, + &OAuthAuthorizeToken{}, + &OAuthAuthorizeTokenList{}, + &OAuthClient{}, + &OAuthClientList{}, + &OAuthClientAuthorization{}, + &OAuthClientAuthorizationList{}, + &OAuthRedirectReference{}, + &UserOAuthAccessToken{}, + &UserOAuthAccessTokenList{}, + ) + metav1.AddToGroupVersion(scheme, GroupVersion) + return nil +} diff --git a/vendor/github.com/openshift/api/oauth/v1/types.go b/vendor/github.com/openshift/api/oauth/v1/types.go new file mode 100644 index 000000000..026c527f5 --- /dev/null +++ b/vendor/github.com/openshift/api/oauth/v1/types.go @@ -0,0 +1,341 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// OAuthAccessToken describes an OAuth access token. +// The name of a token must be prefixed with a `sha256~` string, must not contain "/" or "%" characters and must be at +// least 32 characters long. +// +// The name of the token is constructed from the actual token by sha256-hashing it and using URL-safe unpadded +// base64-encoding (as described in RFC4648) on the hashed result. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type OAuthAccessToken struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // ClientName references the client that created this token. + ClientName string `json:"clientName,omitempty" protobuf:"bytes,2,opt,name=clientName"` + + // ExpiresIn is the seconds from CreationTime before this token expires. + ExpiresIn int64 `json:"expiresIn,omitempty" protobuf:"varint,3,opt,name=expiresIn"` + + // Scopes is an array of the requested scopes. + Scopes []string `json:"scopes,omitempty" protobuf:"bytes,4,rep,name=scopes"` + + // RedirectURI is the redirection associated with the token. + RedirectURI string `json:"redirectURI,omitempty" protobuf:"bytes,5,opt,name=redirectURI"` + + // UserName is the user name associated with this token + UserName string `json:"userName,omitempty" protobuf:"bytes,6,opt,name=userName"` + + // UserUID is the unique UID associated with this token + UserUID string `json:"userUID,omitempty" protobuf:"bytes,7,opt,name=userUID"` + + // AuthorizeToken contains the token that authorized this token + AuthorizeToken string `json:"authorizeToken,omitempty" protobuf:"bytes,8,opt,name=authorizeToken"` + + // RefreshToken is the value by which this token can be renewed. Can be blank. + RefreshToken string `json:"refreshToken,omitempty" protobuf:"bytes,9,opt,name=refreshToken"` + + // InactivityTimeoutSeconds is the value in seconds, from the + // CreationTimestamp, after which this token can no longer be used. + // The value is automatically incremented when the token is used. + InactivityTimeoutSeconds int32 `json:"inactivityTimeoutSeconds,omitempty" protobuf:"varint,10,opt,name=inactivityTimeoutSeconds"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// OAuthAuthorizeToken describes an OAuth authorization token +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type OAuthAuthorizeToken struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // ClientName references the client that created this token. + ClientName string `json:"clientName,omitempty" protobuf:"bytes,2,opt,name=clientName"` + + // ExpiresIn is the seconds from CreationTime before this token expires. + ExpiresIn int64 `json:"expiresIn,omitempty" protobuf:"varint,3,opt,name=expiresIn"` + + // Scopes is an array of the requested scopes. + Scopes []string `json:"scopes,omitempty" protobuf:"bytes,4,rep,name=scopes"` + + // RedirectURI is the redirection associated with the token. + RedirectURI string `json:"redirectURI,omitempty" protobuf:"bytes,5,opt,name=redirectURI"` + + // State data from request + State string `json:"state,omitempty" protobuf:"bytes,6,opt,name=state"` + + // UserName is the user name associated with this token + UserName string `json:"userName,omitempty" protobuf:"bytes,7,opt,name=userName"` + + // UserUID is the unique UID associated with this token. UserUID and UserName must both match + // for this token to be valid. + UserUID string `json:"userUID,omitempty" protobuf:"bytes,8,opt,name=userUID"` + + // CodeChallenge is the optional code_challenge associated with this authorization code, as described in rfc7636 + CodeChallenge string `json:"codeChallenge,omitempty" protobuf:"bytes,9,opt,name=codeChallenge"` + + // CodeChallengeMethod is the optional code_challenge_method associated with this authorization code, as described in rfc7636 + CodeChallengeMethod string `json:"codeChallengeMethod,omitempty" protobuf:"bytes,10,opt,name=codeChallengeMethod"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// OAuthClient describes an OAuth client +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type OAuthClient struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Secret is the unique secret associated with a client + Secret string `json:"secret,omitempty" protobuf:"bytes,2,opt,name=secret"` + + // AdditionalSecrets holds other secrets that may be used to identify the client. This is useful for rotation + // and for service account token validation + AdditionalSecrets []string `json:"additionalSecrets,omitempty" protobuf:"bytes,3,rep,name=additionalSecrets"` + + // RespondWithChallenges indicates whether the client wants authentication needed responses made in the form of challenges instead of redirects + RespondWithChallenges bool `json:"respondWithChallenges,omitempty" protobuf:"varint,4,opt,name=respondWithChallenges"` + + // RedirectURIs is the valid redirection URIs associated with a client + // +patchStrategy=merge + RedirectURIs []string `json:"redirectURIs,omitempty" patchStrategy:"merge" protobuf:"bytes,5,rep,name=redirectURIs"` + + // GrantMethod is a required field which determines how to handle grants for this client. + // Valid grant handling methods are: + // - auto: always approves grant requests, useful for trusted clients + // - prompt: prompts the end user for approval of grant requests, useful for third-party clients + GrantMethod GrantHandlerType `json:"grantMethod,omitempty" protobuf:"bytes,6,opt,name=grantMethod,casttype=GrantHandlerType"` + + // ScopeRestrictions describes which scopes this client can request. Each requested scope + // is checked against each restriction. If any restriction matches, then the scope is allowed. + // If no restriction matches, then the scope is denied. + ScopeRestrictions []ScopeRestriction `json:"scopeRestrictions,omitempty" protobuf:"bytes,7,rep,name=scopeRestrictions"` + + // AccessTokenMaxAgeSeconds overrides the default access token max age for tokens granted to this client. + // 0 means no expiration. + AccessTokenMaxAgeSeconds *int32 `json:"accessTokenMaxAgeSeconds,omitempty" protobuf:"varint,8,opt,name=accessTokenMaxAgeSeconds"` + + // AccessTokenInactivityTimeoutSeconds overrides the default token + // inactivity timeout for tokens granted to this client. + // The value represents the maximum amount of time that can occur between + // consecutive uses of the token. Tokens become invalid if they are not + // used within this temporal window. The user will need to acquire a new + // token to regain access once a token times out. + // This value needs to be set only if the default set in configuration is + // not appropriate for this client. Valid values are: + // - 0: Tokens for this client never time out + // - X: Tokens time out if there is no activity for X seconds + // The current minimum allowed value for X is 300 (5 minutes) + // + // WARNING: existing tokens' timeout will not be affected (lowered) by changing this value + AccessTokenInactivityTimeoutSeconds *int32 `json:"accessTokenInactivityTimeoutSeconds,omitempty" protobuf:"varint,9,opt,name=accessTokenInactivityTimeoutSeconds"` +} + +type GrantHandlerType string + +const ( + // GrantHandlerAuto auto-approves client authorization grant requests + GrantHandlerAuto GrantHandlerType = "auto" + // GrantHandlerPrompt prompts the user to approve new client authorization grant requests + GrantHandlerPrompt GrantHandlerType = "prompt" + // GrantHandlerDeny auto-denies client authorization grant requests + GrantHandlerDeny GrantHandlerType = "deny" +) + +// ScopeRestriction describe one restriction on scopes. Exactly one option must be non-nil. +type ScopeRestriction struct { + // ExactValues means the scope has to match a particular set of strings exactly + ExactValues []string `json:"literals,omitempty" protobuf:"bytes,1,rep,name=literals"` + + // ClusterRole describes a set of restrictions for cluster role scoping. + ClusterRole *ClusterRoleScopeRestriction `json:"clusterRole,omitempty" protobuf:"bytes,2,opt,name=clusterRole"` +} + +// ClusterRoleScopeRestriction describes restrictions on cluster role scopes +type ClusterRoleScopeRestriction struct { + // RoleNames is the list of cluster roles that can referenced. * means anything + RoleNames []string `json:"roleNames" protobuf:"bytes,1,rep,name=roleNames"` + // Namespaces is the list of namespaces that can be referenced. * means any of them (including *) + Namespaces []string `json:"namespaces" protobuf:"bytes,2,rep,name=namespaces"` + // AllowEscalation indicates whether you can request roles and their escalating resources + AllowEscalation bool `json:"allowEscalation" protobuf:"varint,3,opt,name=allowEscalation"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// OAuthClientAuthorization describes an authorization created by an OAuth client +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type OAuthClientAuthorization struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // ClientName references the client that created this authorization + ClientName string `json:"clientName,omitempty" protobuf:"bytes,2,opt,name=clientName"` + + // UserName is the user name that authorized this client + UserName string `json:"userName,omitempty" protobuf:"bytes,3,opt,name=userName"` + + // UserUID is the unique UID associated with this authorization. UserUID and UserName + // must both match for this authorization to be valid. + UserUID string `json:"userUID,omitempty" protobuf:"bytes,4,opt,name=userUID"` + + // Scopes is an array of the granted scopes. + Scopes []string `json:"scopes,omitempty" protobuf:"bytes,5,rep,name=scopes"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// OAuthAccessTokenList is a collection of OAuth access tokens +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type OAuthAccessTokenList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of OAuth access tokens + Items []OAuthAccessToken `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// OAuthAuthorizeTokenList is a collection of OAuth authorization tokens +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type OAuthAuthorizeTokenList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of OAuth authorization tokens + Items []OAuthAuthorizeToken `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// OAuthClientList is a collection of OAuth clients +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type OAuthClientList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of OAuth clients + Items []OAuthClient `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// OAuthClientAuthorizationList is a collection of OAuth client authorizations +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type OAuthClientAuthorizationList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of OAuth client authorizations + Items []OAuthClientAuthorization `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// OAuthRedirectReference is a reference to an OAuth redirect object. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type OAuthRedirectReference struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // The reference to an redirect object in the current namespace. + Reference RedirectReference `json:"reference,omitempty" protobuf:"bytes,2,opt,name=reference"` +} + +// RedirectReference specifies the target in the current namespace that resolves into redirect URIs. Only the 'Route' kind is currently allowed. +type RedirectReference struct { + // The group of the target that is being referred to. + Group string `json:"group" protobuf:"bytes,1,opt,name=group"` + + // The kind of the target that is being referred to. Currently, only 'Route' is allowed. + Kind string `json:"kind" protobuf:"bytes,2,opt,name=kind"` + + // The name of the target that is being referred to. e.g. name of the Route. + Name string `json:"name" protobuf:"bytes,3,opt,name=name"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// UserOAuthAccessToken is a virtual resource to mirror OAuthAccessTokens to +// the user the access token was issued for +// +openshift:compatibility-gen:level=1 +type UserOAuthAccessToken OAuthAccessToken + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// UserOAuthAccessTokenList is a collection of access tokens issued on behalf of +// the requesting user +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type UserOAuthAccessTokenList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + Items []UserOAuthAccessToken `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/github.com/openshift/api/oauth/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/oauth/v1/zz_generated.deepcopy.go new file mode 100644 index 000000000..f1af9dc5f --- /dev/null +++ b/vendor/github.com/openshift/api/oauth/v1/zz_generated.deepcopy.go @@ -0,0 +1,447 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterRoleScopeRestriction) DeepCopyInto(out *ClusterRoleScopeRestriction) { + *out = *in + if in.RoleNames != nil { + in, out := &in.RoleNames, &out.RoleNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Namespaces != nil { + in, out := &in.Namespaces, &out.Namespaces + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRoleScopeRestriction. +func (in *ClusterRoleScopeRestriction) DeepCopy() *ClusterRoleScopeRestriction { + if in == nil { + return nil + } + out := new(ClusterRoleScopeRestriction) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OAuthAccessToken) DeepCopyInto(out *OAuthAccessToken) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthAccessToken. +func (in *OAuthAccessToken) DeepCopy() *OAuthAccessToken { + if in == nil { + return nil + } + out := new(OAuthAccessToken) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OAuthAccessToken) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OAuthAccessTokenList) DeepCopyInto(out *OAuthAccessTokenList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]OAuthAccessToken, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthAccessTokenList. +func (in *OAuthAccessTokenList) DeepCopy() *OAuthAccessTokenList { + if in == nil { + return nil + } + out := new(OAuthAccessTokenList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OAuthAccessTokenList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OAuthAuthorizeToken) DeepCopyInto(out *OAuthAuthorizeToken) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthAuthorizeToken. +func (in *OAuthAuthorizeToken) DeepCopy() *OAuthAuthorizeToken { + if in == nil { + return nil + } + out := new(OAuthAuthorizeToken) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OAuthAuthorizeToken) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OAuthAuthorizeTokenList) DeepCopyInto(out *OAuthAuthorizeTokenList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]OAuthAuthorizeToken, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthAuthorizeTokenList. +func (in *OAuthAuthorizeTokenList) DeepCopy() *OAuthAuthorizeTokenList { + if in == nil { + return nil + } + out := new(OAuthAuthorizeTokenList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OAuthAuthorizeTokenList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OAuthClient) DeepCopyInto(out *OAuthClient) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.AdditionalSecrets != nil { + in, out := &in.AdditionalSecrets, &out.AdditionalSecrets + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.RedirectURIs != nil { + in, out := &in.RedirectURIs, &out.RedirectURIs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ScopeRestrictions != nil { + in, out := &in.ScopeRestrictions, &out.ScopeRestrictions + *out = make([]ScopeRestriction, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AccessTokenMaxAgeSeconds != nil { + in, out := &in.AccessTokenMaxAgeSeconds, &out.AccessTokenMaxAgeSeconds + *out = new(int32) + **out = **in + } + if in.AccessTokenInactivityTimeoutSeconds != nil { + in, out := &in.AccessTokenInactivityTimeoutSeconds, &out.AccessTokenInactivityTimeoutSeconds + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthClient. +func (in *OAuthClient) DeepCopy() *OAuthClient { + if in == nil { + return nil + } + out := new(OAuthClient) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OAuthClient) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OAuthClientAuthorization) DeepCopyInto(out *OAuthClientAuthorization) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthClientAuthorization. +func (in *OAuthClientAuthorization) DeepCopy() *OAuthClientAuthorization { + if in == nil { + return nil + } + out := new(OAuthClientAuthorization) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OAuthClientAuthorization) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OAuthClientAuthorizationList) DeepCopyInto(out *OAuthClientAuthorizationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]OAuthClientAuthorization, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthClientAuthorizationList. +func (in *OAuthClientAuthorizationList) DeepCopy() *OAuthClientAuthorizationList { + if in == nil { + return nil + } + out := new(OAuthClientAuthorizationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OAuthClientAuthorizationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OAuthClientList) DeepCopyInto(out *OAuthClientList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]OAuthClient, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthClientList. +func (in *OAuthClientList) DeepCopy() *OAuthClientList { + if in == nil { + return nil + } + out := new(OAuthClientList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OAuthClientList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OAuthRedirectReference) DeepCopyInto(out *OAuthRedirectReference) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Reference = in.Reference + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthRedirectReference. +func (in *OAuthRedirectReference) DeepCopy() *OAuthRedirectReference { + if in == nil { + return nil + } + out := new(OAuthRedirectReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OAuthRedirectReference) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RedirectReference) DeepCopyInto(out *RedirectReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RedirectReference. +func (in *RedirectReference) DeepCopy() *RedirectReference { + if in == nil { + return nil + } + out := new(RedirectReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScopeRestriction) DeepCopyInto(out *ScopeRestriction) { + *out = *in + if in.ExactValues != nil { + in, out := &in.ExactValues, &out.ExactValues + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ClusterRole != nil { + in, out := &in.ClusterRole, &out.ClusterRole + *out = new(ClusterRoleScopeRestriction) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScopeRestriction. +func (in *ScopeRestriction) DeepCopy() *ScopeRestriction { + if in == nil { + return nil + } + out := new(ScopeRestriction) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserOAuthAccessToken) DeepCopyInto(out *UserOAuthAccessToken) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserOAuthAccessToken. +func (in *UserOAuthAccessToken) DeepCopy() *UserOAuthAccessToken { + if in == nil { + return nil + } + out := new(UserOAuthAccessToken) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *UserOAuthAccessToken) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserOAuthAccessTokenList) DeepCopyInto(out *UserOAuthAccessTokenList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]UserOAuthAccessToken, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserOAuthAccessTokenList. +func (in *UserOAuthAccessTokenList) DeepCopy() *UserOAuthAccessTokenList { + if in == nil { + return nil + } + out := new(UserOAuthAccessTokenList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *UserOAuthAccessTokenList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/vendor/github.com/openshift/api/oauth/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/oauth/v1/zz_generated.swagger_doc_generated.go new file mode 100644 index 000000000..f62b715c0 --- /dev/null +++ b/vendor/github.com/openshift/api/oauth/v1/zz_generated.swagger_doc_generated.go @@ -0,0 +1,171 @@ +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_ClusterRoleScopeRestriction = map[string]string{ + "": "ClusterRoleScopeRestriction describes restrictions on cluster role scopes", + "roleNames": "RoleNames is the list of cluster roles that can referenced. * means anything", + "namespaces": "Namespaces is the list of namespaces that can be referenced. * means any of them (including *)", + "allowEscalation": "AllowEscalation indicates whether you can request roles and their escalating resources", +} + +func (ClusterRoleScopeRestriction) SwaggerDoc() map[string]string { + return map_ClusterRoleScopeRestriction +} + +var map_OAuthAccessToken = map[string]string{ + "": "OAuthAccessToken describes an OAuth access token. The name of a token must be prefixed with a `sha256~` string, must not contain \"/\" or \"%\" characters and must be at least 32 characters long.\n\nThe name of the token is constructed from the actual token by sha256-hashing it and using URL-safe unpadded base64-encoding (as described in RFC4648) on the hashed result.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "clientName": "ClientName references the client that created this token.", + "expiresIn": "ExpiresIn is the seconds from CreationTime before this token expires.", + "scopes": "Scopes is an array of the requested scopes.", + "redirectURI": "RedirectURI is the redirection associated with the token.", + "userName": "UserName is the user name associated with this token", + "userUID": "UserUID is the unique UID associated with this token", + "authorizeToken": "AuthorizeToken contains the token that authorized this token", + "refreshToken": "RefreshToken is the value by which this token can be renewed. Can be blank.", + "inactivityTimeoutSeconds": "InactivityTimeoutSeconds is the value in seconds, from the CreationTimestamp, after which this token can no longer be used. The value is automatically incremented when the token is used.", +} + +func (OAuthAccessToken) SwaggerDoc() map[string]string { + return map_OAuthAccessToken +} + +var map_OAuthAccessTokenList = map[string]string{ + "": "OAuthAccessTokenList is a collection of OAuth access tokens\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "Items is the list of OAuth access tokens", +} + +func (OAuthAccessTokenList) SwaggerDoc() map[string]string { + return map_OAuthAccessTokenList +} + +var map_OAuthAuthorizeToken = map[string]string{ + "": "OAuthAuthorizeToken describes an OAuth authorization token\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "clientName": "ClientName references the client that created this token.", + "expiresIn": "ExpiresIn is the seconds from CreationTime before this token expires.", + "scopes": "Scopes is an array of the requested scopes.", + "redirectURI": "RedirectURI is the redirection associated with the token.", + "state": "State data from request", + "userName": "UserName is the user name associated with this token", + "userUID": "UserUID is the unique UID associated with this token. UserUID and UserName must both match for this token to be valid.", + "codeChallenge": "CodeChallenge is the optional code_challenge associated with this authorization code, as described in rfc7636", + "codeChallengeMethod": "CodeChallengeMethod is the optional code_challenge_method associated with this authorization code, as described in rfc7636", +} + +func (OAuthAuthorizeToken) SwaggerDoc() map[string]string { + return map_OAuthAuthorizeToken +} + +var map_OAuthAuthorizeTokenList = map[string]string{ + "": "OAuthAuthorizeTokenList is a collection of OAuth authorization tokens\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "Items is the list of OAuth authorization tokens", +} + +func (OAuthAuthorizeTokenList) SwaggerDoc() map[string]string { + return map_OAuthAuthorizeTokenList +} + +var map_OAuthClient = map[string]string{ + "": "OAuthClient describes an OAuth client\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "secret": "Secret is the unique secret associated with a client", + "additionalSecrets": "AdditionalSecrets holds other secrets that may be used to identify the client. This is useful for rotation and for service account token validation", + "respondWithChallenges": "RespondWithChallenges indicates whether the client wants authentication needed responses made in the form of challenges instead of redirects", + "redirectURIs": "RedirectURIs is the valid redirection URIs associated with a client", + "grantMethod": "GrantMethod is a required field which determines how to handle grants for this client. Valid grant handling methods are:\n - auto: always approves grant requests, useful for trusted clients\n - prompt: prompts the end user for approval of grant requests, useful for third-party clients", + "scopeRestrictions": "ScopeRestrictions describes which scopes this client can request. Each requested scope is checked against each restriction. If any restriction matches, then the scope is allowed. If no restriction matches, then the scope is denied.", + "accessTokenMaxAgeSeconds": "AccessTokenMaxAgeSeconds overrides the default access token max age for tokens granted to this client. 0 means no expiration.", + "accessTokenInactivityTimeoutSeconds": "AccessTokenInactivityTimeoutSeconds overrides the default token inactivity timeout for tokens granted to this client. The value represents the maximum amount of time that can occur between consecutive uses of the token. Tokens become invalid if they are not used within this temporal window. The user will need to acquire a new token to regain access once a token times out. This value needs to be set only if the default set in configuration is not appropriate for this client. Valid values are: - 0: Tokens for this client never time out - X: Tokens time out if there is no activity for X seconds The current minimum allowed value for X is 300 (5 minutes)\n\nWARNING: existing tokens' timeout will not be affected (lowered) by changing this value", +} + +func (OAuthClient) SwaggerDoc() map[string]string { + return map_OAuthClient +} + +var map_OAuthClientAuthorization = map[string]string{ + "": "OAuthClientAuthorization describes an authorization created by an OAuth client\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "clientName": "ClientName references the client that created this authorization", + "userName": "UserName is the user name that authorized this client", + "userUID": "UserUID is the unique UID associated with this authorization. UserUID and UserName must both match for this authorization to be valid.", + "scopes": "Scopes is an array of the granted scopes.", +} + +func (OAuthClientAuthorization) SwaggerDoc() map[string]string { + return map_OAuthClientAuthorization +} + +var map_OAuthClientAuthorizationList = map[string]string{ + "": "OAuthClientAuthorizationList is a collection of OAuth client authorizations\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "Items is the list of OAuth client authorizations", +} + +func (OAuthClientAuthorizationList) SwaggerDoc() map[string]string { + return map_OAuthClientAuthorizationList +} + +var map_OAuthClientList = map[string]string{ + "": "OAuthClientList is a collection of OAuth clients\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "Items is the list of OAuth clients", +} + +func (OAuthClientList) SwaggerDoc() map[string]string { + return map_OAuthClientList +} + +var map_OAuthRedirectReference = map[string]string{ + "": "OAuthRedirectReference is a reference to an OAuth redirect object.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "reference": "The reference to an redirect object in the current namespace.", +} + +func (OAuthRedirectReference) SwaggerDoc() map[string]string { + return map_OAuthRedirectReference +} + +var map_RedirectReference = map[string]string{ + "": "RedirectReference specifies the target in the current namespace that resolves into redirect URIs. Only the 'Route' kind is currently allowed.", + "group": "The group of the target that is being referred to.", + "kind": "The kind of the target that is being referred to. Currently, only 'Route' is allowed.", + "name": "The name of the target that is being referred to. e.g. name of the Route.", +} + +func (RedirectReference) SwaggerDoc() map[string]string { + return map_RedirectReference +} + +var map_ScopeRestriction = map[string]string{ + "": "ScopeRestriction describe one restriction on scopes. Exactly one option must be non-nil.", + "literals": "ExactValues means the scope has to match a particular set of strings exactly", + "clusterRole": "ClusterRole describes a set of restrictions for cluster role scoping.", +} + +func (ScopeRestriction) SwaggerDoc() map[string]string { + return map_ScopeRestriction +} + +var map_UserOAuthAccessTokenList = map[string]string{ + "": "UserOAuthAccessTokenList is a collection of access tokens issued on behalf of the requesting user\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (UserOAuthAccessTokenList) SwaggerDoc() map[string]string { + return map_UserOAuthAccessTokenList +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/api/openshiftcontrolplane/.codegen.yaml b/vendor/github.com/openshift/api/openshiftcontrolplane/.codegen.yaml new file mode 100644 index 000000000..ffa2c8d9b --- /dev/null +++ b/vendor/github.com/openshift/api/openshiftcontrolplane/.codegen.yaml @@ -0,0 +1,2 @@ +swaggerdocs: + commentPolicy: Warn diff --git a/vendor/github.com/openshift/api/openshiftcontrolplane/install.go b/vendor/github.com/openshift/api/openshiftcontrolplane/install.go new file mode 100644 index 000000000..5c745fd7f --- /dev/null +++ b/vendor/github.com/openshift/api/openshiftcontrolplane/install.go @@ -0,0 +1,26 @@ +package openshiftcontrolplane + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + openshiftcontrolplanev1 "github.com/openshift/api/openshiftcontrolplane/v1" +) + +const ( + GroupName = "openshiftcontrolplane.config.openshift.io" +) + +var ( + schemeBuilder = runtime.NewSchemeBuilder(openshiftcontrolplanev1.Install) + // Install is a function which adds every version of this group to a scheme + Install = schemeBuilder.AddToScheme +) + +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +func Kind(kind string) schema.GroupKind { + return schema.GroupKind{Group: GroupName, Kind: kind} +} diff --git a/vendor/github.com/openshift/api/openshiftcontrolplane/v1/doc.go b/vendor/github.com/openshift/api/openshiftcontrolplane/v1/doc.go new file mode 100644 index 000000000..4528e3c4a --- /dev/null +++ b/vendor/github.com/openshift/api/openshiftcontrolplane/v1/doc.go @@ -0,0 +1,7 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true + +// +groupName=openshiftcontrolplane.config.openshift.io +// Package v1 is the v1 version of the API. +package v1 diff --git a/vendor/github.com/openshift/api/openshiftcontrolplane/v1/register.go b/vendor/github.com/openshift/api/openshiftcontrolplane/v1/register.go new file mode 100644 index 000000000..3d0bb20f2 --- /dev/null +++ b/vendor/github.com/openshift/api/openshiftcontrolplane/v1/register.go @@ -0,0 +1,40 @@ +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" + osinv1 "github.com/openshift/api/osin/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + GroupName = "openshiftcontrolplane.config.openshift.io" + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, osinv1.Install, configv1.Install) + // Install is a function which adds this version to a scheme + Install = schemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = schemeBuilder.AddToScheme +) + +// Resource generated code relies on this being here, but it logically belongs to the group +// DEPRECATED +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &OpenShiftAPIServerConfig{}, + &OpenShiftControllerManagerConfig{}, + &BuildDefaultsConfig{}, + &BuildOverridesConfig{}, + ) + return nil +} diff --git a/vendor/github.com/openshift/api/openshiftcontrolplane/v1/types.go b/vendor/github.com/openshift/api/openshiftcontrolplane/v1/types.go new file mode 100644 index 000000000..bb0f25024 --- /dev/null +++ b/vendor/github.com/openshift/api/openshiftcontrolplane/v1/types.go @@ -0,0 +1,429 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + buildv1 "github.com/openshift/api/build/v1" + configv1 "github.com/openshift/api/config/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +// +openshift:compatibility-gen:internal +type OpenShiftAPIServerConfig struct { + metav1.TypeMeta `json:",inline"` + + // provides the standard apiserver configuration + configv1.GenericAPIServerConfig `json:",inline"` + + // aggregatorConfig contains information about how to verify the aggregator front proxy + AggregatorConfig FrontProxyConfig `json:"aggregatorConfig"` + + // imagePolicyConfig feeds the image policy admission plugin + ImagePolicyConfig ImagePolicyConfig `json:"imagePolicyConfig"` + + // projectConfig feeds an admission plugin + ProjectConfig ProjectConfig `json:"projectConfig"` + + // routingConfig holds information about routing and route generation + RoutingConfig RoutingConfig `json:"routingConfig"` + + // serviceAccountOAuthGrantMethod is used for determining client authorization for service account oauth client. + // It must be either: deny, prompt, or "" + ServiceAccountOAuthGrantMethod GrantHandlerType `json:"serviceAccountOAuthGrantMethod"` + + // jenkinsPipelineConfig holds information about the default Jenkins template + // used for JenkinsPipeline build strategy. + // TODO this needs to become a normal plugin config + JenkinsPipelineConfig JenkinsPipelineConfig `json:"jenkinsPipelineConfig"` + + // cloudProviderFile points to the cloud config file + // TODO this needs to become a normal plugin config + CloudProviderFile string `json:"cloudProviderFile"` + + // TODO this needs to be removed. + APIServerArguments map[string][]string `json:"apiServerArguments"` + + // apiServers holds information about enabled/disabled API servers + APIServers APIServers `json:"apiServers"` +} + +type APIServers struct { + // perGroupOptions is a list of enabled/disabled API servers in addition to the defaults + PerGroupOptions []PerGroupOptions `json:"perGroupOptions"` +} + +type PerGroupOptions struct { + // name is an API server name (see OpenShiftAPIserverName + // typed constants for a complete list of available API servers). + Name OpenShiftAPIserverName `json:"name"` + + // enabledVersions is a list of versions that must be enabled in addition to the defaults. + // Must not collide with the list of disabled versions + EnabledVersions []string `json:"enabledVersions"` + + // disabledVersions is a list of versions that must be disabled in addition to the defaults. + // Must not collide with the list of enabled versions + DisabledVersions []string `json:"disabledVersions"` +} + +type OpenShiftAPIserverName string + +const ( + OpenShiftAppsAPIserver OpenShiftAPIserverName = "apps.openshift.io" + OpenShiftAuthorizationAPIserver OpenShiftAPIserverName = "authorization.openshift.io" + OpenShiftBuildAPIserver OpenShiftAPIserverName = "build.openshift.io" + OpenShiftImageAPIserver OpenShiftAPIserverName = "image.openshift.io" + OpenShiftProjectAPIserver OpenShiftAPIserverName = "project.openshift.io" + OpenShiftQuotaAPIserver OpenShiftAPIserverName = "quota.openshift.io" + OpenShiftRouteAPIserver OpenShiftAPIserverName = "route.openshift.io" + OpenShiftSecurityAPIserver OpenShiftAPIserverName = "security.openshift.io" + OpenShiftTemplateAPIserver OpenShiftAPIserverName = "template.openshift.io" +) + +type FrontProxyConfig struct { + // clientCA is a path to the CA bundle to use to verify the common name of the front proxy's client cert + ClientCA string `json:"clientCA"` + // allowedNames is an optional list of common names to require a match from. + AllowedNames []string `json:"allowedNames"` + + // usernameHeaders is the set of headers to check for the username + UsernameHeaders []string `json:"usernameHeaders"` + // groupHeaders is the set of headers to check for groups + GroupHeaders []string `json:"groupHeaders"` + // extraHeaderPrefixes is the set of header prefixes to check for user extra + ExtraHeaderPrefixes []string `json:"extraHeaderPrefixes"` +} + +type GrantHandlerType string + +const ( + // GrantHandlerAuto auto-approves client authorization grant requests + GrantHandlerAuto GrantHandlerType = "auto" + // GrantHandlerPrompt prompts the user to approve new client authorization grant requests + GrantHandlerPrompt GrantHandlerType = "prompt" + // GrantHandlerDeny auto-denies client authorization grant requests + GrantHandlerDeny GrantHandlerType = "deny" +) + +// RoutingConfig holds the necessary configuration options for routing to subdomains +type RoutingConfig struct { + // subdomain is the suffix appended to $service.$namespace. to form the default route hostname + // DEPRECATED: This field is being replaced by routers setting their own defaults. This is the + // "default" route. + Subdomain string `json:"subdomain"` +} + +type ImagePolicyConfig struct { + // maxImagesBulkImportedPerRepository controls the number of images that are imported when a user + // does a bulk import of a container repository. This number is set low to prevent users from + // importing large numbers of images accidentally. Set -1 for no limit. + MaxImagesBulkImportedPerRepository int `json:"maxImagesBulkImportedPerRepository"` + // allowedRegistriesForImport limits the container image registries that normal users may import + // images from. Set this list to the registries that you trust to contain valid Docker + // images and that you want applications to be able to import from. Users with + // permission to create Images or ImageStreamMappings via the API are not affected by + // this policy - typically only administrators or system integrations will have those + // permissions. + AllowedRegistriesForImport AllowedRegistries `json:"allowedRegistriesForImport"` + + // internalRegistryHostname sets the hostname for the default internal image + // registry. The value must be in "hostname[:port]" format. + // For backward compatibility, users can still use OPENSHIFT_DEFAULT_REGISTRY + // environment variable but this setting overrides the environment variable. + InternalRegistryHostname string `json:"internalRegistryHostname"` + // externalRegistryHostnames provides the hostnames for the default external image + // registry. The external hostname should be set only when the image registry + // is exposed externally. The first value is used in 'publicDockerImageRepository' + // field in ImageStreams. The value must be in "hostname[:port]" format. + ExternalRegistryHostnames []string `json:"externalRegistryHostnames"` + + // additionalTrustedCA is a path to a pem bundle file containing additional CAs that + // should be trusted during imagestream import. + AdditionalTrustedCA string `json:"additionalTrustedCA"` +} + +// AllowedRegistries represents a list of registries allowed for the image import. +type AllowedRegistries []RegistryLocation + +// RegistryLocation contains a location of the registry specified by the registry domain +// name. The domain name might include wildcards, like '*' or '??'. +type RegistryLocation struct { + // DomainName specifies a domain name for the registry + // In case the registry use non-standard (80 or 443) port, the port should be included + // in the domain name as well. + DomainName string `json:"domainName"` + // Insecure indicates whether the registry is secure (https) or insecure (http) + // By default (if not specified) the registry is assumed as secure. + Insecure bool `json:"insecure,omitempty"` +} + +type ProjectConfig struct { + // defaultNodeSelector holds default project node label selector + DefaultNodeSelector string `json:"defaultNodeSelector"` + + // projectRequestMessage is the string presented to a user if they are unable to request a project via the projectrequest api endpoint + ProjectRequestMessage string `json:"projectRequestMessage"` + + // projectRequestTemplate is the template to use for creating projects in response to projectrequest. + // It is in the format namespace/template and it is optional. + // If it is not specified, a default template is used. + ProjectRequestTemplate string `json:"projectRequestTemplate"` +} + +// JenkinsPipelineConfig holds configuration for the Jenkins pipeline strategy +type JenkinsPipelineConfig struct { + // autoProvisionEnabled determines whether a Jenkins server will be spawned from the provided + // template when the first build config in the project with type JenkinsPipeline + // is created. When not specified this option defaults to true. + AutoProvisionEnabled *bool `json:"autoProvisionEnabled"` + // templateNamespace contains the namespace name where the Jenkins template is stored + TemplateNamespace string `json:"templateNamespace"` + // templateName is the name of the default Jenkins template + TemplateName string `json:"templateName"` + // serviceName is the name of the Jenkins service OpenShift uses to detect + // whether a Jenkins pipeline handler has already been installed in a project. + // This value *must* match a service name in the provided template. + ServiceName string `json:"serviceName"` + // parameters specifies a set of optional parameters to the Jenkins template. + Parameters map[string]string `json:"parameters"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +// +openshift:compatibility-gen:internal +type OpenShiftControllerManagerConfig struct { + metav1.TypeMeta `json:",inline"` + + KubeClientConfig configv1.KubeClientConfig `json:"kubeClientConfig"` + + // servingInfo describes how to start serving + ServingInfo *configv1.HTTPServingInfo `json:"servingInfo"` + + // leaderElection defines the configuration for electing a controller instance to make changes to + // the cluster. If unspecified, the ControllerTTL value is checked to determine whether the + // legacy direct etcd election code will be used. + LeaderElection configv1.LeaderElection `json:"leaderElection"` + + // controllers is a list of controllers to enable. '*' enables all on-by-default controllers, 'foo' enables the controller "+ + // named 'foo', '-foo' disables the controller named 'foo'. + // Defaults to "*". + Controllers []string `json:"controllers"` + + ResourceQuota ResourceQuotaControllerConfig `json:"resourceQuota"` + ServiceServingCert ServiceServingCert `json:"serviceServingCert"` + Deployer DeployerControllerConfig `json:"deployer"` + Build BuildControllerConfig `json:"build"` + ServiceAccount ServiceAccountControllerConfig `json:"serviceAccount"` + DockerPullSecret DockerPullSecretControllerConfig `json:"dockerPullSecret"` + Network NetworkControllerConfig `json:"network"` + Ingress IngressControllerConfig `json:"ingress"` + ImageImport ImageImportControllerConfig `json:"imageImport"` + SecurityAllocator SecurityAllocator `json:"securityAllocator"` + + // featureGates are the set of extra OpenShift feature gates for openshift-controller-manager. + // These feature gates can be used to enable features that are tech preview or otherwise not available on + // OpenShift by default. + FeatureGates []string `json:"featureGates"` +} + +type DeployerControllerConfig struct { + ImageTemplateFormat ImageConfig `json:"imageTemplateFormat"` +} + +type BuildControllerConfig struct { + ImageTemplateFormat ImageConfig `json:"imageTemplateFormat"` + + BuildDefaults *BuildDefaultsConfig `json:"buildDefaults"` + BuildOverrides *BuildOverridesConfig `json:"buildOverrides"` + + // additionalTrustedCA is a path to a pem bundle file containing additional CAs that + // should be trusted for image pushes and pulls during builds. + AdditionalTrustedCA string `json:"additionalTrustedCA"` +} + +type ResourceQuotaControllerConfig struct { + ConcurrentSyncs int32 `json:"concurrentSyncs"` + SyncPeriod metav1.Duration `json:"syncPeriod"` + MinResyncPeriod metav1.Duration `json:"minResyncPeriod"` +} + +type IngressControllerConfig struct { + // ingressIPNetworkCIDR controls the range to assign ingress ips from for services of type LoadBalancer on bare + // metal. If empty, ingress ips will not be assigned. It may contain a single CIDR that will be allocated from. + // For security reasons, you should ensure that this range does not overlap with the CIDRs reserved for external ips, + // nodes, pods, or services. + IngressIPNetworkCIDR string `json:"ingressIPNetworkCIDR"` +} + +// MasterNetworkConfig to be passed to the compiled in network plugin +type NetworkControllerConfig struct { + NetworkPluginName string `json:"networkPluginName"` + // clusterNetworks contains a list of cluster networks that defines the global overlay networks L3 space. + ClusterNetworks []ClusterNetworkEntry `json:"clusterNetworks"` + ServiceNetworkCIDR string `json:"serviceNetworkCIDR"` + VXLANPort uint32 `json:"vxlanPort"` +} + +type ServiceAccountControllerConfig struct { + // managedNames is a list of service account names that will be auto-created in every namespace. + // If no names are specified, the ServiceAccountsController will not be started. + ManagedNames []string `json:"managedNames"` +} + +type DockerPullSecretControllerConfig struct { + // registryURLs is a list of urls that the docker pull secrets should be valid for. + RegistryURLs []string `json:"registryURLs"` + + // internalRegistryHostname is the hostname for the default internal image + // registry. The value must be in "hostname[:port]" format. Docker pull secrets + // will be generated for this registry. + InternalRegistryHostname string `json:"internalRegistryHostname"` +} + +type ImageImportControllerConfig struct { + // maxScheduledImageImportsPerMinute is the maximum number of image streams that will be imported in the background per minute. + // The default value is 60. Set to -1 for unlimited. + MaxScheduledImageImportsPerMinute int `json:"maxScheduledImageImportsPerMinute"` + // disableScheduledImport allows scheduled background import of images to be disabled. + DisableScheduledImport bool `json:"disableScheduledImport"` + // scheduledImageImportMinimumIntervalSeconds is the minimum number of seconds that can elapse between when image streams + // scheduled for background import are checked against the upstream repository. The default value is 15 minutes. + ScheduledImageImportMinimumIntervalSeconds int `json:"scheduledImageImportMinimumIntervalSeconds"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// BuildDefaultsConfig controls the default information for Builds +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +// +openshift:compatibility-gen:internal +type BuildDefaultsConfig struct { + metav1.TypeMeta `json:",inline"` + + // gitHTTPProxy is the location of the HTTPProxy for Git source + GitHTTPProxy string `json:"gitHTTPProxy,omitempty"` + + // gitHTTPSProxy is the location of the HTTPSProxy for Git source + GitHTTPSProxy string `json:"gitHTTPSProxy,omitempty"` + + // gitNoProxy is the list of domains for which the proxy should not be used + GitNoProxy string `json:"gitNoProxy,omitempty"` + + // env is a set of default environment variables that will be applied to the + // build if the specified variables do not exist on the build + Env []corev1.EnvVar `json:"env,omitempty"` + + // sourceStrategyDefaults are default values that apply to builds using the + // source strategy. + SourceStrategyDefaults *SourceStrategyDefaultsConfig `json:"sourceStrategyDefaults,omitempty"` + + // imageLabels is a list of labels that are applied to the resulting image. + // User can override a default label by providing a label with the same name in their + // Build/BuildConfig. + ImageLabels []buildv1.ImageLabel `json:"imageLabels,omitempty"` + + // nodeSelector is a selector which must be true for the build pod to fit on a node + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + + // annotations are annotations that will be added to the build pod + Annotations map[string]string `json:"annotations,omitempty"` + + // resources defines resource requirements to execute the build. + Resources corev1.ResourceRequirements `json:"resources,omitempty"` +} + +// SourceStrategyDefaultsConfig contains values that apply to builds using the +// source strategy. +type SourceStrategyDefaultsConfig struct { + + // incremental indicates if s2i build strategies should perform an incremental + // build or not + Incremental *bool `json:"incremental,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// BuildOverridesConfig controls override settings for builds +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +// +openshift:compatibility-gen:internal +type BuildOverridesConfig struct { + metav1.TypeMeta `json:",inline"` + + // forcePull overrides, if set, the equivalent value in the builds, + // i.e. false disables force pull for all builds, + // true enables force pull for all builds, + // independently of what each build specifies itself + // +optional + ForcePull *bool `json:"forcePull,omitempty"` + + // imageLabels is a list of labels that are applied to the resulting image. + // If user provided a label in their Build/BuildConfig with the same name as one in this + // list, the user's label will be overwritten. + ImageLabels []buildv1.ImageLabel `json:"imageLabels,omitempty"` + + // nodeSelector is a selector which must be true for the build pod to fit on a node + NodeSelector map[string]string `json:"nodeSelector,omitempty"` + + // annotations are annotations that will be added to the build pod + Annotations map[string]string `json:"annotations,omitempty"` + + // tolerations is a list of Tolerations that will override any existing + // tolerations set on a build pod. + Tolerations []corev1.Toleration `json:"tolerations,omitempty"` +} + +// ImageConfig holds the necessary configuration options for building image names for system components +type ImageConfig struct { + // Format is the format of the name to be built for the system component + Format string `json:"format"` + // Latest determines if the latest tag will be pulled from the registry + Latest bool `json:"latest"` +} + +// ServiceServingCert holds configuration for service serving cert signer which creates cert/key pairs for +// pods fulfilling a service to serve with. +type ServiceServingCert struct { + // Signer holds the signing information used to automatically sign serving certificates. + // If this value is nil, then certs are not signed automatically. + Signer *configv1.CertInfo `json:"signer"` +} + +// ClusterNetworkEntry defines an individual cluster network. The CIDRs cannot overlap with other cluster network CIDRs, CIDRs reserved for external ips, CIDRs reserved for service networks, and CIDRs reserved for ingress ips. +type ClusterNetworkEntry struct { + // CIDR defines the total range of a cluster networks address space. + CIDR string `json:"cidr"` + // HostSubnetLength is the number of bits of the accompanying CIDR address to allocate to each node. eg, 8 would mean that each node would have a /24 slice of the overlay network for its pod. + HostSubnetLength uint32 `json:"hostSubnetLength"` +} + +// SecurityAllocator controls the automatic allocation of UIDs and MCS labels to a project. If nil, allocation is disabled. +type SecurityAllocator struct { + // UIDAllocatorRange defines the total set of Unix user IDs (UIDs) that will be allocated to projects automatically, and the size of the + // block each namespace gets. For example, 1000-1999/10 will allocate ten UIDs per namespace, and will be able to allocate up to 100 blocks + // before running out of space. The default is to allocate from 1 billion to 2 billion in 10k blocks (which is the expected size of the + // ranges container images will use once user namespaces are started). + UIDAllocatorRange string `json:"uidAllocatorRange"` + // MCSAllocatorRange defines the range of MCS categories that will be assigned to namespaces. The format is + // "/[,]". The default is "s0/2" and will allocate from c0 -> c1023, which means a total of 535k labels + // are available (1024 choose 2 ~ 535k). If this value is changed after startup, new projects may receive labels that are already allocated + // to other projects. Prefix may be any valid SELinux set of terms (including user, role, and type), although leaving them as the default + // will allow the server to set them automatically. + // + // Examples: + // * s0:/2 - Allocate labels from s0:c0,c0 to s0:c511,c511 + // * s0:/2,512 - Allocate labels from s0:c0,c0,c0 to s0:c511,c511,511 + // + MCSAllocatorRange string `json:"mcsAllocatorRange"` + // MCSLabelsPerProject defines the number of labels that should be reserved per project. The default is 5 to match the default UID and MCS + // ranges (100k namespaces, 535k/5 labels). + MCSLabelsPerProject int `json:"mcsLabelsPerProject"` +} diff --git a/vendor/github.com/openshift/api/openshiftcontrolplane/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/openshiftcontrolplane/v1/zz_generated.deepcopy.go new file mode 100644 index 000000000..62de55ed4 --- /dev/null +++ b/vendor/github.com/openshift/api/openshiftcontrolplane/v1/zz_generated.deepcopy.go @@ -0,0 +1,679 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + buildv1 "github.com/openshift/api/build/v1" + configv1 "github.com/openshift/api/config/v1" + corev1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIServers) DeepCopyInto(out *APIServers) { + *out = *in + if in.PerGroupOptions != nil { + in, out := &in.PerGroupOptions, &out.PerGroupOptions + *out = make([]PerGroupOptions, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServers. +func (in *APIServers) DeepCopy() *APIServers { + if in == nil { + return nil + } + out := new(APIServers) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in AllowedRegistries) DeepCopyInto(out *AllowedRegistries) { + { + in := &in + *out = make(AllowedRegistries, len(*in)) + copy(*out, *in) + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedRegistries. +func (in AllowedRegistries) DeepCopy() AllowedRegistries { + if in == nil { + return nil + } + out := new(AllowedRegistries) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildControllerConfig) DeepCopyInto(out *BuildControllerConfig) { + *out = *in + out.ImageTemplateFormat = in.ImageTemplateFormat + if in.BuildDefaults != nil { + in, out := &in.BuildDefaults, &out.BuildDefaults + *out = new(BuildDefaultsConfig) + (*in).DeepCopyInto(*out) + } + if in.BuildOverrides != nil { + in, out := &in.BuildOverrides, &out.BuildOverrides + *out = new(BuildOverridesConfig) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildControllerConfig. +func (in *BuildControllerConfig) DeepCopy() *BuildControllerConfig { + if in == nil { + return nil + } + out := new(BuildControllerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildDefaultsConfig) DeepCopyInto(out *BuildDefaultsConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]corev1.EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.SourceStrategyDefaults != nil { + in, out := &in.SourceStrategyDefaults, &out.SourceStrategyDefaults + *out = new(SourceStrategyDefaultsConfig) + (*in).DeepCopyInto(*out) + } + if in.ImageLabels != nil { + in, out := &in.ImageLabels, &out.ImageLabels + *out = make([]buildv1.ImageLabel, len(*in)) + copy(*out, *in) + } + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + in.Resources.DeepCopyInto(&out.Resources) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildDefaultsConfig. +func (in *BuildDefaultsConfig) DeepCopy() *BuildDefaultsConfig { + if in == nil { + return nil + } + out := new(BuildDefaultsConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BuildDefaultsConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BuildOverridesConfig) DeepCopyInto(out *BuildOverridesConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.ForcePull != nil { + in, out := &in.ForcePull, &out.ForcePull + *out = new(bool) + **out = **in + } + if in.ImageLabels != nil { + in, out := &in.ImageLabels, &out.ImageLabels + *out = make([]buildv1.ImageLabel, len(*in)) + copy(*out, *in) + } + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]corev1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildOverridesConfig. +func (in *BuildOverridesConfig) DeepCopy() *BuildOverridesConfig { + if in == nil { + return nil + } + out := new(BuildOverridesConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BuildOverridesConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterNetworkEntry) DeepCopyInto(out *ClusterNetworkEntry) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterNetworkEntry. +func (in *ClusterNetworkEntry) DeepCopy() *ClusterNetworkEntry { + if in == nil { + return nil + } + out := new(ClusterNetworkEntry) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeployerControllerConfig) DeepCopyInto(out *DeployerControllerConfig) { + *out = *in + out.ImageTemplateFormat = in.ImageTemplateFormat + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeployerControllerConfig. +func (in *DeployerControllerConfig) DeepCopy() *DeployerControllerConfig { + if in == nil { + return nil + } + out := new(DeployerControllerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DockerPullSecretControllerConfig) DeepCopyInto(out *DockerPullSecretControllerConfig) { + *out = *in + if in.RegistryURLs != nil { + in, out := &in.RegistryURLs, &out.RegistryURLs + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerPullSecretControllerConfig. +func (in *DockerPullSecretControllerConfig) DeepCopy() *DockerPullSecretControllerConfig { + if in == nil { + return nil + } + out := new(DockerPullSecretControllerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FrontProxyConfig) DeepCopyInto(out *FrontProxyConfig) { + *out = *in + if in.AllowedNames != nil { + in, out := &in.AllowedNames, &out.AllowedNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.UsernameHeaders != nil { + in, out := &in.UsernameHeaders, &out.UsernameHeaders + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.GroupHeaders != nil { + in, out := &in.GroupHeaders, &out.GroupHeaders + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExtraHeaderPrefixes != nil { + in, out := &in.ExtraHeaderPrefixes, &out.ExtraHeaderPrefixes + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FrontProxyConfig. +func (in *FrontProxyConfig) DeepCopy() *FrontProxyConfig { + if in == nil { + return nil + } + out := new(FrontProxyConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageConfig) DeepCopyInto(out *ImageConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageConfig. +func (in *ImageConfig) DeepCopy() *ImageConfig { + if in == nil { + return nil + } + out := new(ImageConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageImportControllerConfig) DeepCopyInto(out *ImageImportControllerConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageImportControllerConfig. +func (in *ImageImportControllerConfig) DeepCopy() *ImageImportControllerConfig { + if in == nil { + return nil + } + out := new(ImageImportControllerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImagePolicyConfig) DeepCopyInto(out *ImagePolicyConfig) { + *out = *in + if in.AllowedRegistriesForImport != nil { + in, out := &in.AllowedRegistriesForImport, &out.AllowedRegistriesForImport + *out = make(AllowedRegistries, len(*in)) + copy(*out, *in) + } + if in.ExternalRegistryHostnames != nil { + in, out := &in.ExternalRegistryHostnames, &out.ExternalRegistryHostnames + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImagePolicyConfig. +func (in *ImagePolicyConfig) DeepCopy() *ImagePolicyConfig { + if in == nil { + return nil + } + out := new(ImagePolicyConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressControllerConfig) DeepCopyInto(out *IngressControllerConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressControllerConfig. +func (in *IngressControllerConfig) DeepCopy() *IngressControllerConfig { + if in == nil { + return nil + } + out := new(IngressControllerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JenkinsPipelineConfig) DeepCopyInto(out *JenkinsPipelineConfig) { + *out = *in + if in.AutoProvisionEnabled != nil { + in, out := &in.AutoProvisionEnabled, &out.AutoProvisionEnabled + *out = new(bool) + **out = **in + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JenkinsPipelineConfig. +func (in *JenkinsPipelineConfig) DeepCopy() *JenkinsPipelineConfig { + if in == nil { + return nil + } + out := new(JenkinsPipelineConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkControllerConfig) DeepCopyInto(out *NetworkControllerConfig) { + *out = *in + if in.ClusterNetworks != nil { + in, out := &in.ClusterNetworks, &out.ClusterNetworks + *out = make([]ClusterNetworkEntry, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkControllerConfig. +func (in *NetworkControllerConfig) DeepCopy() *NetworkControllerConfig { + if in == nil { + return nil + } + out := new(NetworkControllerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenShiftAPIServerConfig) DeepCopyInto(out *OpenShiftAPIServerConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + in.GenericAPIServerConfig.DeepCopyInto(&out.GenericAPIServerConfig) + in.AggregatorConfig.DeepCopyInto(&out.AggregatorConfig) + in.ImagePolicyConfig.DeepCopyInto(&out.ImagePolicyConfig) + out.ProjectConfig = in.ProjectConfig + out.RoutingConfig = in.RoutingConfig + in.JenkinsPipelineConfig.DeepCopyInto(&out.JenkinsPipelineConfig) + if in.APIServerArguments != nil { + in, out := &in.APIServerArguments, &out.APIServerArguments + *out = make(map[string][]string, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make([]string, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + in.APIServers.DeepCopyInto(&out.APIServers) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenShiftAPIServerConfig. +func (in *OpenShiftAPIServerConfig) DeepCopy() *OpenShiftAPIServerConfig { + if in == nil { + return nil + } + out := new(OpenShiftAPIServerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OpenShiftAPIServerConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenShiftControllerManagerConfig) DeepCopyInto(out *OpenShiftControllerManagerConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + out.KubeClientConfig = in.KubeClientConfig + if in.ServingInfo != nil { + in, out := &in.ServingInfo, &out.ServingInfo + *out = new(configv1.HTTPServingInfo) + (*in).DeepCopyInto(*out) + } + out.LeaderElection = in.LeaderElection + if in.Controllers != nil { + in, out := &in.Controllers, &out.Controllers + *out = make([]string, len(*in)) + copy(*out, *in) + } + out.ResourceQuota = in.ResourceQuota + in.ServiceServingCert.DeepCopyInto(&out.ServiceServingCert) + out.Deployer = in.Deployer + in.Build.DeepCopyInto(&out.Build) + in.ServiceAccount.DeepCopyInto(&out.ServiceAccount) + in.DockerPullSecret.DeepCopyInto(&out.DockerPullSecret) + in.Network.DeepCopyInto(&out.Network) + out.Ingress = in.Ingress + out.ImageImport = in.ImageImport + out.SecurityAllocator = in.SecurityAllocator + if in.FeatureGates != nil { + in, out := &in.FeatureGates, &out.FeatureGates + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenShiftControllerManagerConfig. +func (in *OpenShiftControllerManagerConfig) DeepCopy() *OpenShiftControllerManagerConfig { + if in == nil { + return nil + } + out := new(OpenShiftControllerManagerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OpenShiftControllerManagerConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PerGroupOptions) DeepCopyInto(out *PerGroupOptions) { + *out = *in + if in.EnabledVersions != nil { + in, out := &in.EnabledVersions, &out.EnabledVersions + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.DisabledVersions != nil { + in, out := &in.DisabledVersions, &out.DisabledVersions + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PerGroupOptions. +func (in *PerGroupOptions) DeepCopy() *PerGroupOptions { + if in == nil { + return nil + } + out := new(PerGroupOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectConfig) DeepCopyInto(out *ProjectConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectConfig. +func (in *ProjectConfig) DeepCopy() *ProjectConfig { + if in == nil { + return nil + } + out := new(ProjectConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RegistryLocation) DeepCopyInto(out *RegistryLocation) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistryLocation. +func (in *RegistryLocation) DeepCopy() *RegistryLocation { + if in == nil { + return nil + } + out := new(RegistryLocation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceQuotaControllerConfig) DeepCopyInto(out *ResourceQuotaControllerConfig) { + *out = *in + out.SyncPeriod = in.SyncPeriod + out.MinResyncPeriod = in.MinResyncPeriod + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceQuotaControllerConfig. +func (in *ResourceQuotaControllerConfig) DeepCopy() *ResourceQuotaControllerConfig { + if in == nil { + return nil + } + out := new(ResourceQuotaControllerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoutingConfig) DeepCopyInto(out *RoutingConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoutingConfig. +func (in *RoutingConfig) DeepCopy() *RoutingConfig { + if in == nil { + return nil + } + out := new(RoutingConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityAllocator) DeepCopyInto(out *SecurityAllocator) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityAllocator. +func (in *SecurityAllocator) DeepCopy() *SecurityAllocator { + if in == nil { + return nil + } + out := new(SecurityAllocator) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountControllerConfig) DeepCopyInto(out *ServiceAccountControllerConfig) { + *out = *in + if in.ManagedNames != nil { + in, out := &in.ManagedNames, &out.ManagedNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountControllerConfig. +func (in *ServiceAccountControllerConfig) DeepCopy() *ServiceAccountControllerConfig { + if in == nil { + return nil + } + out := new(ServiceAccountControllerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceServingCert) DeepCopyInto(out *ServiceServingCert) { + *out = *in + if in.Signer != nil { + in, out := &in.Signer, &out.Signer + *out = new(configv1.CertInfo) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceServingCert. +func (in *ServiceServingCert) DeepCopy() *ServiceServingCert { + if in == nil { + return nil + } + out := new(ServiceServingCert) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SourceStrategyDefaultsConfig) DeepCopyInto(out *SourceStrategyDefaultsConfig) { + *out = *in + if in.Incremental != nil { + in, out := &in.Incremental, &out.Incremental + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SourceStrategyDefaultsConfig. +func (in *SourceStrategyDefaultsConfig) DeepCopy() *SourceStrategyDefaultsConfig { + if in == nil { + return nil + } + out := new(SourceStrategyDefaultsConfig) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/api/openshiftcontrolplane/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/openshiftcontrolplane/v1/zz_generated.swagger_doc_generated.go new file mode 100644 index 000000000..b50508a63 --- /dev/null +++ b/vendor/github.com/openshift/api/openshiftcontrolplane/v1/zz_generated.swagger_doc_generated.go @@ -0,0 +1,257 @@ +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_APIServers = map[string]string{ + "perGroupOptions": "perGroupOptions is a list of enabled/disabled API servers in addition to the defaults", +} + +func (APIServers) SwaggerDoc() map[string]string { + return map_APIServers +} + +var map_BuildControllerConfig = map[string]string{ + "additionalTrustedCA": "additionalTrustedCA is a path to a pem bundle file containing additional CAs that should be trusted for image pushes and pulls during builds.", +} + +func (BuildControllerConfig) SwaggerDoc() map[string]string { + return map_BuildControllerConfig +} + +var map_BuildDefaultsConfig = map[string]string{ + "": "BuildDefaultsConfig controls the default information for Builds\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "gitHTTPProxy": "gitHTTPProxy is the location of the HTTPProxy for Git source", + "gitHTTPSProxy": "gitHTTPSProxy is the location of the HTTPSProxy for Git source", + "gitNoProxy": "gitNoProxy is the list of domains for which the proxy should not be used", + "env": "env is a set of default environment variables that will be applied to the build if the specified variables do not exist on the build", + "sourceStrategyDefaults": "sourceStrategyDefaults are default values that apply to builds using the source strategy.", + "imageLabels": "imageLabels is a list of labels that are applied to the resulting image. User can override a default label by providing a label with the same name in their Build/BuildConfig.", + "nodeSelector": "nodeSelector is a selector which must be true for the build pod to fit on a node", + "annotations": "annotations are annotations that will be added to the build pod", + "resources": "resources defines resource requirements to execute the build.", +} + +func (BuildDefaultsConfig) SwaggerDoc() map[string]string { + return map_BuildDefaultsConfig +} + +var map_BuildOverridesConfig = map[string]string{ + "": "BuildOverridesConfig controls override settings for builds\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "forcePull": "forcePull overrides, if set, the equivalent value in the builds, i.e. false disables force pull for all builds, true enables force pull for all builds, independently of what each build specifies itself", + "imageLabels": "imageLabels is a list of labels that are applied to the resulting image. If user provided a label in their Build/BuildConfig with the same name as one in this list, the user's label will be overwritten.", + "nodeSelector": "nodeSelector is a selector which must be true for the build pod to fit on a node", + "annotations": "annotations are annotations that will be added to the build pod", + "tolerations": "tolerations is a list of Tolerations that will override any existing tolerations set on a build pod.", +} + +func (BuildOverridesConfig) SwaggerDoc() map[string]string { + return map_BuildOverridesConfig +} + +var map_ClusterNetworkEntry = map[string]string{ + "": "ClusterNetworkEntry defines an individual cluster network. The CIDRs cannot overlap with other cluster network CIDRs, CIDRs reserved for external ips, CIDRs reserved for service networks, and CIDRs reserved for ingress ips.", + "cidr": "CIDR defines the total range of a cluster networks address space.", + "hostSubnetLength": "HostSubnetLength is the number of bits of the accompanying CIDR address to allocate to each node. eg, 8 would mean that each node would have a /24 slice of the overlay network for its pod.", +} + +func (ClusterNetworkEntry) SwaggerDoc() map[string]string { + return map_ClusterNetworkEntry +} + +var map_DockerPullSecretControllerConfig = map[string]string{ + "registryURLs": "registryURLs is a list of urls that the docker pull secrets should be valid for.", + "internalRegistryHostname": "internalRegistryHostname is the hostname for the default internal image registry. The value must be in \"hostname[:port]\" format. Docker pull secrets will be generated for this registry.", +} + +func (DockerPullSecretControllerConfig) SwaggerDoc() map[string]string { + return map_DockerPullSecretControllerConfig +} + +var map_FrontProxyConfig = map[string]string{ + "clientCA": "clientCA is a path to the CA bundle to use to verify the common name of the front proxy's client cert", + "allowedNames": "allowedNames is an optional list of common names to require a match from.", + "usernameHeaders": "usernameHeaders is the set of headers to check for the username", + "groupHeaders": "groupHeaders is the set of headers to check for groups", + "extraHeaderPrefixes": "extraHeaderPrefixes is the set of header prefixes to check for user extra", +} + +func (FrontProxyConfig) SwaggerDoc() map[string]string { + return map_FrontProxyConfig +} + +var map_ImageConfig = map[string]string{ + "": "ImageConfig holds the necessary configuration options for building image names for system components", + "format": "Format is the format of the name to be built for the system component", + "latest": "Latest determines if the latest tag will be pulled from the registry", +} + +func (ImageConfig) SwaggerDoc() map[string]string { + return map_ImageConfig +} + +var map_ImageImportControllerConfig = map[string]string{ + "maxScheduledImageImportsPerMinute": "maxScheduledImageImportsPerMinute is the maximum number of image streams that will be imported in the background per minute. The default value is 60. Set to -1 for unlimited.", + "disableScheduledImport": "disableScheduledImport allows scheduled background import of images to be disabled.", + "scheduledImageImportMinimumIntervalSeconds": "scheduledImageImportMinimumIntervalSeconds is the minimum number of seconds that can elapse between when image streams scheduled for background import are checked against the upstream repository. The default value is 15 minutes.", +} + +func (ImageImportControllerConfig) SwaggerDoc() map[string]string { + return map_ImageImportControllerConfig +} + +var map_ImagePolicyConfig = map[string]string{ + "maxImagesBulkImportedPerRepository": "maxImagesBulkImportedPerRepository controls the number of images that are imported when a user does a bulk import of a container repository. This number is set low to prevent users from importing large numbers of images accidentally. Set -1 for no limit.", + "allowedRegistriesForImport": "allowedRegistriesForImport limits the container image registries that normal users may import images from. Set this list to the registries that you trust to contain valid Docker images and that you want applications to be able to import from. Users with permission to create Images or ImageStreamMappings via the API are not affected by this policy - typically only administrators or system integrations will have those permissions.", + "internalRegistryHostname": "internalRegistryHostname sets the hostname for the default internal image registry. The value must be in \"hostname[:port]\" format. For backward compatibility, users can still use OPENSHIFT_DEFAULT_REGISTRY environment variable but this setting overrides the environment variable.", + "externalRegistryHostnames": "externalRegistryHostnames provides the hostnames for the default external image registry. The external hostname should be set only when the image registry is exposed externally. The first value is used in 'publicDockerImageRepository' field in ImageStreams. The value must be in \"hostname[:port]\" format.", + "additionalTrustedCA": "additionalTrustedCA is a path to a pem bundle file containing additional CAs that should be trusted during imagestream import.", +} + +func (ImagePolicyConfig) SwaggerDoc() map[string]string { + return map_ImagePolicyConfig +} + +var map_IngressControllerConfig = map[string]string{ + "ingressIPNetworkCIDR": "ingressIPNetworkCIDR controls the range to assign ingress ips from for services of type LoadBalancer on bare metal. If empty, ingress ips will not be assigned. It may contain a single CIDR that will be allocated from. For security reasons, you should ensure that this range does not overlap with the CIDRs reserved for external ips, nodes, pods, or services.", +} + +func (IngressControllerConfig) SwaggerDoc() map[string]string { + return map_IngressControllerConfig +} + +var map_JenkinsPipelineConfig = map[string]string{ + "": "JenkinsPipelineConfig holds configuration for the Jenkins pipeline strategy", + "autoProvisionEnabled": "autoProvisionEnabled determines whether a Jenkins server will be spawned from the provided template when the first build config in the project with type JenkinsPipeline is created. When not specified this option defaults to true.", + "templateNamespace": "templateNamespace contains the namespace name where the Jenkins template is stored", + "templateName": "templateName is the name of the default Jenkins template", + "serviceName": "serviceName is the name of the Jenkins service OpenShift uses to detect whether a Jenkins pipeline handler has already been installed in a project. This value *must* match a service name in the provided template.", + "parameters": "parameters specifies a set of optional parameters to the Jenkins template.", +} + +func (JenkinsPipelineConfig) SwaggerDoc() map[string]string { + return map_JenkinsPipelineConfig +} + +var map_NetworkControllerConfig = map[string]string{ + "": "MasterNetworkConfig to be passed to the compiled in network plugin", + "clusterNetworks": "clusterNetworks contains a list of cluster networks that defines the global overlay networks L3 space.", +} + +func (NetworkControllerConfig) SwaggerDoc() map[string]string { + return map_NetworkControllerConfig +} + +var map_OpenShiftAPIServerConfig = map[string]string{ + "": "Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "aggregatorConfig": "aggregatorConfig contains information about how to verify the aggregator front proxy", + "imagePolicyConfig": "imagePolicyConfig feeds the image policy admission plugin", + "projectConfig": "projectConfig feeds an admission plugin", + "routingConfig": "routingConfig holds information about routing and route generation", + "serviceAccountOAuthGrantMethod": "serviceAccountOAuthGrantMethod is used for determining client authorization for service account oauth client. It must be either: deny, prompt, or \"\"", + "jenkinsPipelineConfig": "jenkinsPipelineConfig holds information about the default Jenkins template used for JenkinsPipeline build strategy.", + "cloudProviderFile": "cloudProviderFile points to the cloud config file", + "apiServers": "apiServers holds information about enabled/disabled API servers", +} + +func (OpenShiftAPIServerConfig) SwaggerDoc() map[string]string { + return map_OpenShiftAPIServerConfig +} + +var map_OpenShiftControllerManagerConfig = map[string]string{ + "": "Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "servingInfo": "servingInfo describes how to start serving", + "leaderElection": "leaderElection defines the configuration for electing a controller instance to make changes to the cluster. If unspecified, the ControllerTTL value is checked to determine whether the legacy direct etcd election code will be used.", + "controllers": "controllers is a list of controllers to enable. '*' enables all on-by-default controllers, 'foo' enables the controller \"+ named 'foo', '-foo' disables the controller named 'foo'. Defaults to \"*\".", + "featureGates": "featureGates are the set of extra OpenShift feature gates for openshift-controller-manager. These feature gates can be used to enable features that are tech preview or otherwise not available on OpenShift by default.", +} + +func (OpenShiftControllerManagerConfig) SwaggerDoc() map[string]string { + return map_OpenShiftControllerManagerConfig +} + +var map_PerGroupOptions = map[string]string{ + "name": "name is an API server name (see OpenShiftAPIserverName typed constants for a complete list of available API servers).", + "enabledVersions": "enabledVersions is a list of versions that must be enabled in addition to the defaults. Must not collide with the list of disabled versions", + "disabledVersions": "disabledVersions is a list of versions that must be disabled in addition to the defaults. Must not collide with the list of enabled versions", +} + +func (PerGroupOptions) SwaggerDoc() map[string]string { + return map_PerGroupOptions +} + +var map_ProjectConfig = map[string]string{ + "defaultNodeSelector": "defaultNodeSelector holds default project node label selector", + "projectRequestMessage": "projectRequestMessage is the string presented to a user if they are unable to request a project via the projectrequest api endpoint", + "projectRequestTemplate": "projectRequestTemplate is the template to use for creating projects in response to projectrequest. It is in the format namespace/template and it is optional. If it is not specified, a default template is used.", +} + +func (ProjectConfig) SwaggerDoc() map[string]string { + return map_ProjectConfig +} + +var map_RegistryLocation = map[string]string{ + "": "RegistryLocation contains a location of the registry specified by the registry domain name. The domain name might include wildcards, like '*' or '??'.", + "domainName": "DomainName specifies a domain name for the registry In case the registry use non-standard (80 or 443) port, the port should be included in the domain name as well.", + "insecure": "Insecure indicates whether the registry is secure (https) or insecure (http) By default (if not specified) the registry is assumed as secure.", +} + +func (RegistryLocation) SwaggerDoc() map[string]string { + return map_RegistryLocation +} + +var map_RoutingConfig = map[string]string{ + "": "RoutingConfig holds the necessary configuration options for routing to subdomains", + "subdomain": "subdomain is the suffix appended to $service.$namespace. to form the default route hostname DEPRECATED: This field is being replaced by routers setting their own defaults. This is the \"default\" route.", +} + +func (RoutingConfig) SwaggerDoc() map[string]string { + return map_RoutingConfig +} + +var map_SecurityAllocator = map[string]string{ + "": "SecurityAllocator controls the automatic allocation of UIDs and MCS labels to a project. If nil, allocation is disabled.", + "uidAllocatorRange": "UIDAllocatorRange defines the total set of Unix user IDs (UIDs) that will be allocated to projects automatically, and the size of the block each namespace gets. For example, 1000-1999/10 will allocate ten UIDs per namespace, and will be able to allocate up to 100 blocks before running out of space. The default is to allocate from 1 billion to 2 billion in 10k blocks (which is the expected size of the ranges container images will use once user namespaces are started).", + "mcsAllocatorRange": "MCSAllocatorRange defines the range of MCS categories that will be assigned to namespaces. The format is \"/[,]\". The default is \"s0/2\" and will allocate from c0 -> c1023, which means a total of 535k labels are available (1024 choose 2 ~ 535k). If this value is changed after startup, new projects may receive labels that are already allocated to other projects. Prefix may be any valid SELinux set of terms (including user, role, and type), although leaving them as the default will allow the server to set them automatically.\n\nExamples: * s0:/2 - Allocate labels from s0:c0,c0 to s0:c511,c511 * s0:/2,512 - Allocate labels from s0:c0,c0,c0 to s0:c511,c511,511", + "mcsLabelsPerProject": "MCSLabelsPerProject defines the number of labels that should be reserved per project. The default is 5 to match the default UID and MCS ranges (100k namespaces, 535k/5 labels).", +} + +func (SecurityAllocator) SwaggerDoc() map[string]string { + return map_SecurityAllocator +} + +var map_ServiceAccountControllerConfig = map[string]string{ + "managedNames": "managedNames is a list of service account names that will be auto-created in every namespace. If no names are specified, the ServiceAccountsController will not be started.", +} + +func (ServiceAccountControllerConfig) SwaggerDoc() map[string]string { + return map_ServiceAccountControllerConfig +} + +var map_ServiceServingCert = map[string]string{ + "": "ServiceServingCert holds configuration for service serving cert signer which creates cert/key pairs for pods fulfilling a service to serve with.", + "signer": "Signer holds the signing information used to automatically sign serving certificates. If this value is nil, then certs are not signed automatically.", +} + +func (ServiceServingCert) SwaggerDoc() map[string]string { + return map_ServiceServingCert +} + +var map_SourceStrategyDefaultsConfig = map[string]string{ + "": "SourceStrategyDefaultsConfig contains values that apply to builds using the source strategy.", + "incremental": "incremental indicates if s2i build strategies should perform an incremental build or not", +} + +func (SourceStrategyDefaultsConfig) SwaggerDoc() map[string]string { + return map_SourceStrategyDefaultsConfig +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/api/operator/.codegen.yaml b/vendor/github.com/openshift/api/operator/.codegen.yaml new file mode 100644 index 000000000..0791e8be7 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/.codegen.yaml @@ -0,0 +1,8 @@ +schemapatch: + requiredFeatureSets: + - "" + - "Default" + - "TechPreviewNoUpgrade" + - "CustomNoUpgrade" +swaggerdocs: + commentPolicy: Warn diff --git a/vendor/github.com/openshift/api/operator/install.go b/vendor/github.com/openshift/api/operator/install.go new file mode 100644 index 000000000..9cbf25a4b --- /dev/null +++ b/vendor/github.com/openshift/api/operator/install.go @@ -0,0 +1,27 @@ +package operator + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + operatorv1 "github.com/openshift/api/operator/v1" + operatorv1alpha1 "github.com/openshift/api/operator/v1alpha1" +) + +const ( + GroupName = "operator.openshift.io" +) + +var ( + schemeBuilder = runtime.NewSchemeBuilder(operatorv1alpha1.Install, operatorv1.Install) + // Install is a function which adds every version of this group to a scheme + Install = schemeBuilder.AddToScheme +) + +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +func Kind(kind string) schema.GroupKind { + return schema.GroupKind{Group: GroupName, Kind: kind} +} diff --git a/vendor/github.com/openshift/api/operator/v1/0000_10_config-operator_01_config.crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_10_config-operator_01_config.crd.yaml index b137f2434..35359737c 100644 --- a/vendor/github.com/openshift/api/operator/v1/0000_10_config-operator_01_config.crd.yaml +++ b/vendor/github.com/openshift/api/operator/v1/0000_10_config-operator_01_config.crd.yaml @@ -67,7 +67,7 @@ spec: - Trace - TraceAll unsupportedConfigOverrides: - description: 'unsupportedConfigOverrides holds a sparse config that will override any previously set options. It only needs to be the fields to override it will end up overlaying in the following order: 1. hardcoded defaults 2. observedConfig 3. unsupportedConfigOverrides' + description: unsupportedConfigOverrides overrides the final configuration that was computed by the operator. Red Hat does not support the use of this field. Misuse of this field could lead to unexpected behavior or conflict with other configuration options. Seek guidance from the Red Hat support before using this field. Use of this property blocks cluster upgrades, it must be removed before upgrading your cluster. type: object nullable: true x-kubernetes-preserve-unknown-fields: true diff --git a/vendor/github.com/openshift/api/operator/v1/0000_12_etcd-operator_01_config.crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_12_etcd-operator_01_config.crd.yaml index ff4dc1c8a..813d03ca7 100644 --- a/vendor/github.com/openshift/api/operator/v1/0000_12_etcd-operator_01_config.crd.yaml +++ b/vendor/github.com/openshift/api/operator/v1/0000_12_etcd-operator_01_config.crd.yaml @@ -77,7 +77,7 @@ spec: type: integer format: int32 unsupportedConfigOverrides: - description: 'unsupportedConfigOverrides holds a sparse config that will override any previously set options. It only needs to be the fields to override it will end up overlaying in the following order: 1. hardcoded defaults 2. observedConfig 3. unsupportedConfigOverrides' + description: unsupportedConfigOverrides overrides the final configuration that was computed by the operator. Red Hat does not support the use of this field. Misuse of this field could lead to unexpected behavior or conflict with other configuration options. Seek guidance from the Red Hat support before using this field. Use of this property blocks cluster upgrades, it must be removed before upgrading your cluster. type: object nullable: true x-kubernetes-preserve-unknown-fields: true diff --git a/vendor/github.com/openshift/api/operator/v1/0000_20_kube-apiserver-operator_01_config.crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_20_kube-apiserver-operator_01_config.crd.yaml index c31f076cd..c9d56f599 100644 --- a/vendor/github.com/openshift/api/operator/v1/0000_20_kube-apiserver-operator_01_config.crd.yaml +++ b/vendor/github.com/openshift/api/operator/v1/0000_20_kube-apiserver-operator_01_config.crd.yaml @@ -72,7 +72,7 @@ spec: format: int32 type: integer unsupportedConfigOverrides: - description: 'unsupportedConfigOverrides holds a sparse config that will override any previously set options. It only needs to be the fields to override it will end up overlaying in the following order: 1. hardcoded defaults 2. observedConfig 3. unsupportedConfigOverrides' + description: unsupportedConfigOverrides overrides the final configuration that was computed by the operator. Red Hat does not support the use of this field. Misuse of this field could lead to unexpected behavior or conflict with other configuration options. Seek guidance from the Red Hat support before using this field. Use of this property blocks cluster upgrades, it must be removed before upgrading your cluster. nullable: true type: object x-kubernetes-preserve-unknown-fields: true diff --git a/vendor/github.com/openshift/api/operator/v1/0000_25_kube-controller-manager-operator_01_config.crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_25_kube-controller-manager-operator_01_config.crd.yaml index 5562b60cd..745b23d2b 100644 --- a/vendor/github.com/openshift/api/operator/v1/0000_25_kube-controller-manager-operator_01_config.crd.yaml +++ b/vendor/github.com/openshift/api/operator/v1/0000_25_kube-controller-manager-operator_01_config.crd.yaml @@ -74,7 +74,7 @@ spec: format: int32 type: integer unsupportedConfigOverrides: - description: 'unsupportedConfigOverrides holds a sparse config that will override any previously set options. It only needs to be the fields to override it will end up overlaying in the following order: 1. hardcoded defaults 2. observedConfig 3. unsupportedConfigOverrides' + description: unsupportedConfigOverrides overrides the final configuration that was computed by the operator. Red Hat does not support the use of this field. Misuse of this field could lead to unexpected behavior or conflict with other configuration options. Seek guidance from the Red Hat support before using this field. Use of this property blocks cluster upgrades, it must be removed before upgrading your cluster. nullable: true type: object x-kubernetes-preserve-unknown-fields: true diff --git a/vendor/github.com/openshift/api/operator/v1/0000_25_kube-scheduler-operator_01_config.crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_25_kube-scheduler-operator_01_config.crd.yaml index db68407ac..389c31e12 100644 --- a/vendor/github.com/openshift/api/operator/v1/0000_25_kube-scheduler-operator_01_config.crd.yaml +++ b/vendor/github.com/openshift/api/operator/v1/0000_25_kube-scheduler-operator_01_config.crd.yaml @@ -74,7 +74,7 @@ spec: format: int32 type: integer unsupportedConfigOverrides: - description: 'unsupportedConfigOverrides holds a sparse config that will override any previously set options. It only needs to be the fields to override it will end up overlaying in the following order: 1. hardcoded defaults 2. observedConfig 3. unsupportedConfigOverrides' + description: unsupportedConfigOverrides overrides the final configuration that was computed by the operator. Red Hat does not support the use of this field. Misuse of this field could lead to unexpected behavior or conflict with other configuration options. Seek guidance from the Red Hat support before using this field. Use of this property blocks cluster upgrades, it must be removed before upgrading your cluster. nullable: true type: object x-kubernetes-preserve-unknown-fields: true diff --git a/vendor/github.com/openshift/api/operator/v1/0000_30_openshift-apiserver-operator_01_config.crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_30_openshift-apiserver-operator_01_config.crd.yaml index 937718b77..5a28d5654 100644 --- a/vendor/github.com/openshift/api/operator/v1/0000_30_openshift-apiserver-operator_01_config.crd.yaml +++ b/vendor/github.com/openshift/api/operator/v1/0000_30_openshift-apiserver-operator_01_config.crd.yaml @@ -67,7 +67,7 @@ spec: - Trace - TraceAll unsupportedConfigOverrides: - description: 'unsupportedConfigOverrides holds a sparse config that will override any previously set options. It only needs to be the fields to override it will end up overlaying in the following order: 1. hardcoded defaults 2. observedConfig 3. unsupportedConfigOverrides' + description: unsupportedConfigOverrides overrides the final configuration that was computed by the operator. Red Hat does not support the use of this field. Misuse of this field could lead to unexpected behavior or conflict with other configuration options. Seek guidance from the Red Hat support before using this field. Use of this property blocks cluster upgrades, it must be removed before upgrading your cluster. type: object nullable: true x-kubernetes-preserve-unknown-fields: true diff --git a/vendor/github.com/openshift/api/operator/v1/0000_40_cloud-credential-operator_00_config.crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_40_cloud-credential-operator_00_config.crd.yaml index 0515ed8ff..bce7c8c7e 100644 --- a/vendor/github.com/openshift/api/operator/v1/0000_40_cloud-credential-operator_00_config.crd.yaml +++ b/vendor/github.com/openshift/api/operator/v1/0000_40_cloud-credential-operator_00_config.crd.yaml @@ -3,6 +3,7 @@ kind: CustomResourceDefinition metadata: annotations: api-approved.openshift.io: https://github.com/openshift/api/pull/692 + include.release.openshift.io/ibm-cloud-managed: "true" include.release.openshift.io/self-managed-high-availability: "true" include.release.openshift.io/single-node-developer: "true" name: cloudcredentials.operator.openshift.io @@ -73,7 +74,7 @@ spec: - Trace - TraceAll unsupportedConfigOverrides: - description: 'unsupportedConfigOverrides holds a sparse config that will override any previously set options. It only needs to be the fields to override it will end up overlaying in the following order: 1. hardcoded defaults 2. observedConfig 3. unsupportedConfigOverrides' + description: unsupportedConfigOverrides overrides the final configuration that was computed by the operator. Red Hat does not support the use of this field. Misuse of this field could lead to unexpected behavior or conflict with other configuration options. Seek guidance from the Red Hat support before using this field. Use of this property blocks cluster upgrades, it must be removed before upgrading your cluster. type: object nullable: true x-kubernetes-preserve-unknown-fields: true diff --git a/vendor/github.com/openshift/api/operator/v1/0000_40_kube-storage-version-migrator-operator_00_config.crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_40_kube-storage-version-migrator-operator_00_config.crd.yaml index befa175b7..49a62e702 100644 --- a/vendor/github.com/openshift/api/operator/v1/0000_40_kube-storage-version-migrator-operator_00_config.crd.yaml +++ b/vendor/github.com/openshift/api/operator/v1/0000_40_kube-storage-version-migrator-operator_00_config.crd.yaml @@ -65,7 +65,7 @@ spec: - Trace - TraceAll unsupportedConfigOverrides: - description: 'unsupportedConfigOverrides holds a sparse config that will override any previously set options. It only needs to be the fields to override it will end up overlaying in the following order: 1. hardcoded defaults 2. observedConfig 3. unsupportedConfigOverrides' + description: unsupportedConfigOverrides overrides the final configuration that was computed by the operator. Red Hat does not support the use of this field. Misuse of this field could lead to unexpected behavior or conflict with other configuration options. Seek guidance from the Red Hat support before using this field. Use of this property blocks cluster upgrades, it must be removed before upgrading your cluster. type: object nullable: true x-kubernetes-preserve-unknown-fields: true diff --git a/vendor/github.com/openshift/api/operator/v1/0000_50_cluster-authentication-operator_01_config.crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_50_cluster-authentication-operator_01_config.crd.yaml index 1efa2d46e..2464d26cd 100644 --- a/vendor/github.com/openshift/api/operator/v1/0000_50_cluster-authentication-operator_01_config.crd.yaml +++ b/vendor/github.com/openshift/api/operator/v1/0000_50_cluster-authentication-operator_01_config.crd.yaml @@ -63,7 +63,7 @@ spec: - Trace - TraceAll unsupportedConfigOverrides: - description: 'unsupportedConfigOverrides holds a sparse config that will override any previously set options. It only needs to be the fields to override it will end up overlaying in the following order: 1. hardcoded defaults 2. observedConfig 3. unsupportedConfigOverrides' + description: unsupportedConfigOverrides overrides the final configuration that was computed by the operator. Red Hat does not support the use of this field. Misuse of this field could lead to unexpected behavior or conflict with other configuration options. Seek guidance from the Red Hat support before using this field. Use of this property blocks cluster upgrades, it must be removed before upgrading your cluster. type: object nullable: true x-kubernetes-preserve-unknown-fields: true diff --git a/vendor/github.com/openshift/api/operator/v1/0000_50_cluster-openshift-controller-manager-operator_02_config.crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_50_cluster-openshift-controller-manager-operator_02_config.crd.yaml index 64b1e93ba..740b7943d 100644 --- a/vendor/github.com/openshift/api/operator/v1/0000_50_cluster-openshift-controller-manager-operator_02_config.crd.yaml +++ b/vendor/github.com/openshift/api/operator/v1/0000_50_cluster-openshift-controller-manager-operator_02_config.crd.yaml @@ -66,7 +66,7 @@ spec: - Trace - TraceAll unsupportedConfigOverrides: - description: 'unsupportedConfigOverrides holds a sparse config that will override any previously set options. It only needs to be the fields to override it will end up overlaying in the following order: 1. hardcoded defaults 2. observedConfig 3. unsupportedConfigOverrides' + description: unsupportedConfigOverrides overrides the final configuration that was computed by the operator. Red Hat does not support the use of this field. Misuse of this field could lead to unexpected behavior or conflict with other configuration options. Seek guidance from the Red Hat support before using this field. Use of this property blocks cluster upgrades, it must be removed before upgrading your cluster. type: object nullable: true x-kubernetes-preserve-unknown-fields: true diff --git a/vendor/github.com/openshift/api/operator/v1/0000_50_cluster_storage_operator_01_crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_50_cluster_storage_operator_01_crd.yaml index 2bf181862..80bb8eddf 100644 --- a/vendor/github.com/openshift/api/operator/v1/0000_50_cluster_storage_operator_01_crd.yaml +++ b/vendor/github.com/openshift/api/operator/v1/0000_50_cluster_storage_operator_01_crd.yaml @@ -65,10 +65,20 @@ spec: - Trace - TraceAll unsupportedConfigOverrides: - description: 'unsupportedConfigOverrides holds a sparse config that will override any previously set options. It only needs to be the fields to override it will end up overlaying in the following order: 1. hardcoded defaults 2. observedConfig 3. unsupportedConfigOverrides' + description: unsupportedConfigOverrides overrides the final configuration that was computed by the operator. Red Hat does not support the use of this field. Misuse of this field could lead to unexpected behavior or conflict with other configuration options. Seek guidance from the Red Hat support before using this field. Use of this property blocks cluster upgrades, it must be removed before upgrading your cluster. type: object nullable: true x-kubernetes-preserve-unknown-fields: true + vsphereStorageDriver: + description: 'VSphereStorageDriver indicates the storage driver to use on VSphere clusters. Once this field is set to CSIWithMigrationDriver, it can not be changed. If this is empty, the platform will choose a good default, which may change over time without notice. The current default is CSIWithMigrationDriver and may not be changed. DEPRECATED: This field will be removed in a future release.' + type: string + enum: + - "" + - LegacyDeprecatedInTreeDriver + - CSIWithMigrationDriver + x-kubernetes-validations: + - rule: self != "LegacyDeprecatedInTreeDriver" + message: VSphereStorageDriver can not be set to LegacyDeprecatedInTreeDriver status: description: status holds observed values from the cluster. They may not be overridden. type: object diff --git a/vendor/github.com/openshift/api/operator/v1/0000_50_ingress-operator_00-ingresscontroller.crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_50_ingress-operator_00-ingresscontroller.crd.yaml index b6ff95a92..4ff57e35a 100644 --- a/vendor/github.com/openshift/api/operator/v1/0000_50_ingress-operator_00-ingresscontroller.crd.yaml +++ b/vendor/github.com/openshift/api/operator/v1/0000_50_ingress-operator_00-ingresscontroller.crd.yaml @@ -161,8 +161,19 @@ spec: - Local type: string type: object + ibm: + description: "ibm provides configuration settings that are specific to IBM Cloud load balancers. \n If empty, defaults will be applied. See specific ibm fields for details about their defaults." + properties: + protocol: + description: "protocol specifies whether the load balancer uses PROXY protocol to forward connections to the IngressController. See \"service.kubernetes.io/ibm-load-balancer-cloud-provider-enable-features: \"proxy-protocol\"\" at https://cloud.ibm.com/docs/containers?topic=containers-vpc-lbaas\" \n PROXY protocol can be used with load balancers that support it to communicate the source addresses of client connections when forwarding those connections to the IngressController. Using PROXY protocol enables the IngressController to report those source addresses instead of reporting the load balancer's address in HTTP headers and logs. Note that enabling PROXY protocol on the IngressController will cause connections to fail if you are not using a load balancer that uses PROXY protocol to forward connections to the IngressController. See http://www.haproxy.org/download/2.2/doc/proxy-protocol.txt for information about PROXY protocol. \n Valid values for protocol are TCP, PROXY and omitted. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default is TCP, without the proxy protocol enabled." + enum: + - "" + - TCP + - PROXY + type: string + type: object type: - description: type is the underlying infrastructure provider for the load balancer. Allowed values are "AWS", "Azure", "BareMetal", "GCP", "Nutanix", "OpenStack", and "VSphere". + description: type is the underlying infrastructure provider for the load balancer. Allowed values are "AWS", "Azure", "BareMetal", "GCP", "IBM", "Nutanix", "OpenStack", and "VSphere". enum: - AWS - Azure @@ -250,6 +261,130 @@ spec: httpHeaders: description: "httpHeaders defines policy for HTTP headers. \n If this field is empty, the default values are used." properties: + actions: + description: 'actions specifies options for modifying headers and their values. Note that this option only applies to cleartext HTTP connections and to secure HTTP connections for which the ingress controller terminates encryption (that is, edge-terminated or reencrypt connections). Headers cannot be modified for TLS passthrough connections. Setting the HSTS (`Strict-Transport-Security`) header is not supported via actions. `Strict-Transport-Security` may only be configured using the "haproxy.router.openshift.io/hsts_header" route annotation, and only in accordance with the policy specified in Ingress.Spec.RequiredHSTSPolicies. Any actions defined here are applied after any actions related to the following other fields: cache-control, spec.clientTLS, spec.httpHeaders.forwardedHeaderPolicy, spec.httpHeaders.uniqueId, and spec.httpHeaders.headerNameCaseAdjustments. In case of HTTP request headers, the actions specified in spec.httpHeaders.actions on the Route will be executed after the actions specified in the IngressController''s spec.httpHeaders.actions field. In case of HTTP response headers, the actions specified in spec.httpHeaders.actions on the IngressController will be executed after the actions specified in the Route''s spec.httpHeaders.actions field. Headers set using this API cannot be captured for use in access logs. The following header names are reserved and may not be modified via this API: Strict-Transport-Security, Proxy, Host, Cookie, Set-Cookie. Note that the total size of all net added headers *after* interpolating dynamic values must not exceed the value of spec.tuningOptions.headerBufferMaxRewriteBytes on the IngressController. Please refer to the documentation for that API field for more details.' + properties: + request: + description: 'request is a list of HTTP request headers to modify. Actions defined here will modify the request headers of all requests passing through an ingress controller. These actions are applied to all Routes i.e. for all connections handled by the ingress controller defined within a cluster. IngressController actions for request headers will be executed before Route actions. Currently, actions may define to either `Set` or `Delete` headers values. Actions are applied in sequence as defined in this list. A maximum of 20 request header actions may be configured. Sample fetchers allowed are "req.hdr" and "ssl_c_der". Converters allowed are "lower" and "base64". Example header values: "%[req.hdr(X-target),lower]", "%{+Q}[ssl_c_der,base64]".' + items: + description: IngressControllerHTTPHeader specifies configuration for setting or deleting an HTTP header. + properties: + action: + description: action specifies actions to perform on headers, such as setting or deleting headers. + properties: + set: + description: set specifies how the HTTP header should be set. This field is required when type is Set and forbidden otherwise. + properties: + value: + description: value specifies a header value. Dynamic values can be added. The value will be interpreted as an HAProxy format string as defined in http://cbonte.github.io/haproxy-dconv/2.6/configuration.html#8.2.6 and may use HAProxy's %[] syntax and otherwise must be a valid HTTP header value as defined in https://datatracker.ietf.org/doc/html/rfc7230#section-3.2. The value of this field must be no more than 16384 characters in length. Note that the total size of all net added headers *after* interpolating dynamic values must not exceed the value of spec.tuningOptions.headerBufferMaxRewriteBytes on the IngressController. + maxLength: 16384 + minLength: 1 + type: string + required: + - value + type: object + type: + description: type defines the type of the action to be applied on the header. Possible values are Set or Delete. Set allows you to set HTTP request and response headers. Delete allows you to delete HTTP request and response headers. + enum: + - Set + - Delete + type: string + required: + - type + type: object + x-kubernetes-validations: + - message: set is required when type is Set, and forbidden otherwise + rule: 'has(self.type) && self.type == ''Set'' ? has(self.set) : !has(self.set)' + name: + description: 'name specifies the name of a header on which to perform an action. Its value must be a valid HTTP header name as defined in RFC 2616 section 4.2. The name must consist only of alphanumeric and the following special characters, "-!#$%&''*+.^_`". The following header names are reserved and may not be modified via this API: Strict-Transport-Security, Proxy, Host, Cookie, Set-Cookie. It must be no more than 255 characters in length. Header name must be unique.' + maxLength: 255 + minLength: 1 + pattern: ^[-!#$%&'*+.0-9A-Z^_`a-z|~]+$ + type: string + x-kubernetes-validations: + - message: strict-transport-security header may not be modified via header actions + rule: self.lowerAscii() != 'strict-transport-security' + - message: proxy header may not be modified via header actions + rule: self.lowerAscii() != 'proxy' + - message: host header may not be modified via header actions + rule: self.lowerAscii() != 'host' + - message: cookie header may not be modified via header actions + rule: self.lowerAscii() != 'cookie' + - message: set-cookie header may not be modified via header actions + rule: self.lowerAscii() != 'set-cookie' + required: + - action + - name + type: object + maxItems: 20 + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + x-kubernetes-validations: + - message: Either the header value provided is not in correct format or the sample fetcher/converter specified is not allowed. The dynamic header value will be interpreted as an HAProxy format string as defined in http://cbonte.github.io/haproxy-dconv/2.6/configuration.html#8.2.6 and may use HAProxy's %[] syntax and otherwise must be a valid HTTP header value as defined in https://datatracker.ietf.org/doc/html/rfc7230#section-3.2. Sample fetchers allowed are req.hdr, ssl_c_der. Converters allowed are lower, base64. + rule: self.all(key, key.action.type == "Delete" || (has(key.action.set) && key.action.set.value.matches('^(?:%(?:%|(?:\\{[-+]?[QXE](?:,[-+]?[QXE])*\\})?\\[(?:req\\.hdr\\([0-9A-Za-z-]+\\)|ssl_c_der)(?:,(?:lower|base64))*\\])|[^%[:cntrl:]])+$'))) + response: + description: 'response is a list of HTTP response headers to modify. Actions defined here will modify the response headers of all requests passing through an ingress controller. These actions are applied to all Routes i.e. for all connections handled by the ingress controller defined within a cluster. IngressController actions for response headers will be executed after Route actions. Currently, actions may define to either `Set` or `Delete` headers values. Actions are applied in sequence as defined in this list. A maximum of 20 response header actions may be configured. Sample fetchers allowed are "res.hdr" and "ssl_c_der". Converters allowed are "lower" and "base64". Example header values: "%[res.hdr(X-target),lower]", "%{+Q}[ssl_c_der,base64]".' + items: + description: IngressControllerHTTPHeader specifies configuration for setting or deleting an HTTP header. + properties: + action: + description: action specifies actions to perform on headers, such as setting or deleting headers. + properties: + set: + description: set specifies how the HTTP header should be set. This field is required when type is Set and forbidden otherwise. + properties: + value: + description: value specifies a header value. Dynamic values can be added. The value will be interpreted as an HAProxy format string as defined in http://cbonte.github.io/haproxy-dconv/2.6/configuration.html#8.2.6 and may use HAProxy's %[] syntax and otherwise must be a valid HTTP header value as defined in https://datatracker.ietf.org/doc/html/rfc7230#section-3.2. The value of this field must be no more than 16384 characters in length. Note that the total size of all net added headers *after* interpolating dynamic values must not exceed the value of spec.tuningOptions.headerBufferMaxRewriteBytes on the IngressController. + maxLength: 16384 + minLength: 1 + type: string + required: + - value + type: object + type: + description: type defines the type of the action to be applied on the header. Possible values are Set or Delete. Set allows you to set HTTP request and response headers. Delete allows you to delete HTTP request and response headers. + enum: + - Set + - Delete + type: string + required: + - type + type: object + x-kubernetes-validations: + - message: set is required when type is Set, and forbidden otherwise + rule: 'has(self.type) && self.type == ''Set'' ? has(self.set) : !has(self.set)' + name: + description: 'name specifies the name of a header on which to perform an action. Its value must be a valid HTTP header name as defined in RFC 2616 section 4.2. The name must consist only of alphanumeric and the following special characters, "-!#$%&''*+.^_`". The following header names are reserved and may not be modified via this API: Strict-Transport-Security, Proxy, Host, Cookie, Set-Cookie. It must be no more than 255 characters in length. Header name must be unique.' + maxLength: 255 + minLength: 1 + pattern: ^[-!#$%&'*+.0-9A-Z^_`a-z|~]+$ + type: string + x-kubernetes-validations: + - message: strict-transport-security header may not be modified via header actions + rule: self.lowerAscii() != 'strict-transport-security' + - message: proxy header may not be modified via header actions + rule: self.lowerAscii() != 'proxy' + - message: host header may not be modified via header actions + rule: self.lowerAscii() != 'host' + - message: cookie header may not be modified via header actions + rule: self.lowerAscii() != 'cookie' + - message: set-cookie header may not be modified via header actions + rule: self.lowerAscii() != 'set-cookie' + required: + - action + - name + type: object + maxItems: 20 + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + x-kubernetes-validations: + - message: Either the header value provided is not in correct format or the sample fetcher/converter specified is not allowed. The dynamic header value will be interpreted as an HAProxy format string as defined in http://cbonte.github.io/haproxy-dconv/2.6/configuration.html#8.2.6 and may use HAProxy's %[] syntax and otherwise must be a valid HTTP header value as defined in https://datatracker.ietf.org/doc/html/rfc7230#section-3.2. Sample fetchers allowed are res.hdr, ssl_c_der. Converters allowed are lower, base64. + rule: self.all(key, key.action.type == "Delete" || (has(key.action.set) && key.action.set.value.matches('^(?:%(?:%|(?:\\{[-+]?[QXE](?:,[-+]?[QXE])*\\})?\\[(?:res\\.hdr\\([0-9A-Za-z-]+\\)|ssl_c_der)(?:,(?:lower|base64))*\\])|[^%[:cntrl:]])+$'))) + type: object forwardedHeaderPolicy: description: "forwardedHeaderPolicy specifies when and how the IngressController sets the Forwarded, X-Forwarded-For, X-Forwarded-Host, X-Forwarded-Port, X-Forwarded-Proto, and X-Forwarded-Proto-Version HTTP headers. The value may be one of the following: \n * \"Append\", which specifies that the IngressController appends the headers, preserving existing headers. \n * \"Replace\", which specifies that the IngressController sets the headers, replacing any existing Forwarded or X-Forwarded-* headers. \n * \"IfNone\", which specifies that the IngressController sets the headers if they are not already set. \n * \"Never\", which specifies that the IngressController never sets the headers, preserving any existing headers. \n By default, the policy is \"Append\"." enum: @@ -296,6 +431,14 @@ spec: properties: container: description: container holds parameters for the Container logging destination. Present only if type is Container. + properties: + maxLength: + default: 1024 + description: "maxLength is the maximum length of the log message. \n Valid values are integers in the range 480 to 8192, inclusive. \n When omitted, the default value is 1024." + format: int32 + maximum: 8192 + minimum: 480 + type: integer type: object syslog: description: syslog holds parameters for a syslog endpoint. Present only if type is Syslog. @@ -340,7 +483,7 @@ spec: type: string maxLength: default: 1024 - description: "maxLength is the maximum length of the syslog message \n If this field is empty, the maxLength is set to \"1024\"." + description: "maxLength is the maximum length of the log message. \n Valid values are integers in the range 480 to 4096, inclusive. \n When omitted, the default value is 1024." format: int32 maximum: 4096 minimum: 480 @@ -827,8 +970,19 @@ spec: - Local type: string type: object + ibm: + description: "ibm provides configuration settings that are specific to IBM Cloud load balancers. \n If empty, defaults will be applied. See specific ibm fields for details about their defaults." + properties: + protocol: + description: "protocol specifies whether the load balancer uses PROXY protocol to forward connections to the IngressController. See \"service.kubernetes.io/ibm-load-balancer-cloud-provider-enable-features: \"proxy-protocol\"\" at https://cloud.ibm.com/docs/containers?topic=containers-vpc-lbaas\" \n PROXY protocol can be used with load balancers that support it to communicate the source addresses of client connections when forwarding those connections to the IngressController. Using PROXY protocol enables the IngressController to report those source addresses instead of reporting the load balancer's address in HTTP headers and logs. Note that enabling PROXY protocol on the IngressController will cause connections to fail if you are not using a load balancer that uses PROXY protocol to forward connections to the IngressController. See http://www.haproxy.org/download/2.2/doc/proxy-protocol.txt for information about PROXY protocol. \n Valid values for protocol are TCP, PROXY and omitted. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default is TCP, without the proxy protocol enabled." + enum: + - "" + - TCP + - PROXY + type: string + type: object type: - description: type is the underlying infrastructure provider for the load balancer. Allowed values are "AWS", "Azure", "BareMetal", "GCP", "Nutanix", "OpenStack", and "VSphere". + description: type is the underlying infrastructure provider for the load balancer. Allowed values are "AWS", "Azure", "BareMetal", "GCP", "IBM", "Nutanix", "OpenStack", and "VSphere". enum: - AWS - Azure diff --git a/vendor/github.com/openshift/api/operator/v1/0000_50_insights-operator_00-insightsoperator.crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_50_insights-operator_00-insightsoperator.crd.yaml index 32e024555..caa93347b 100644 --- a/vendor/github.com/openshift/api/operator/v1/0000_50_insights-operator_00-insightsoperator.crd.yaml +++ b/vendor/github.com/openshift/api/operator/v1/0000_50_insights-operator_00-insightsoperator.crd.yaml @@ -66,7 +66,7 @@ spec: - Trace - TraceAll unsupportedConfigOverrides: - description: 'unsupportedConfigOverrides holds a sparse config that will override any previously set options. It only needs to be the fields to override it will end up overlaying in the following order: 1. hardcoded defaults 2. observedConfig 3. unsupportedConfigOverrides' + description: unsupportedConfigOverrides overrides the final configuration that was computed by the operator. Red Hat does not support the use of this field. Misuse of this field could lead to unexpected behavior or conflict with other configuration options. Seek guidance from the Red Hat support before using this field. Use of this property blocks cluster upgrades, it must be removed before upgrading your cluster. type: object nullable: true x-kubernetes-preserve-unknown-fields: true diff --git a/vendor/github.com/openshift/api/operator/v1/0000_50_service-ca-operator_02_crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_50_service-ca-operator_02_crd.yaml index 3c7a67d61..8ceeb70cd 100644 --- a/vendor/github.com/openshift/api/operator/v1/0000_50_service-ca-operator_02_crd.yaml +++ b/vendor/github.com/openshift/api/operator/v1/0000_50_service-ca-operator_02_crd.yaml @@ -66,7 +66,7 @@ spec: - Trace - TraceAll unsupportedConfigOverrides: - description: 'unsupportedConfigOverrides holds a sparse config that will override any previously set options. It only needs to be the fields to override it will end up overlaying in the following order: 1. hardcoded defaults 2. observedConfig 3. unsupportedConfigOverrides' + description: unsupportedConfigOverrides overrides the final configuration that was computed by the operator. Red Hat does not support the use of this field. Misuse of this field could lead to unexpected behavior or conflict with other configuration options. Seek guidance from the Red Hat support before using this field. Use of this property blocks cluster upgrades, it must be removed before upgrading your cluster. type: object nullable: true x-kubernetes-preserve-unknown-fields: true diff --git a/vendor/github.com/openshift/api/operator/v1/0000_70_cluster-network-operator_01.crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_70_cluster-network-operator_01.crd.yaml index 8d6d83713..0d944cbcd 100644 --- a/vendor/github.com/openshift/api/operator/v1/0000_70_cluster-network-operator_01.crd.yaml +++ b/vendor/github.com/openshift/api/operator/v1/0000_70_cluster-network-operator_01.crd.yaml @@ -156,7 +156,7 @@ spec: description: enablePortPoolsPrepopulation when true will make Kuryr prepopulate each newly created port pool with a minimum number of ports. Kuryr uses Neutron port pooling to fight the fact that it takes a significant amount of time to create one. It creates a number of ports when the first pod that is configured to use the dedicated network for pods is created in a namespace, and keeps them ready to be attached to pods. Port prepopulation is disabled by default. type: boolean mtu: - description: mtu is the MTU that Kuryr should use when creating pod networks in Neutron. The value has to be lower or equal to the MTU of the nodes network and Neutron has to allow creation of tenant networks with such MTU. If unset Pod networks will be created with the same MTU as the nodes network has. + description: mtu is the MTU that Kuryr should use when creating pod networks in Neutron. The value has to be lower or equal to the MTU of the nodes network and Neutron has to allow creation of tenant networks with such MTU. If unset Pod networks will be created with the same MTU as the nodes network has. This also affects the services network created by cluster-network-operator. type: integer format: int32 minimum: 0 @@ -216,6 +216,9 @@ spec: description: gatewayConfig holds the configuration for node gateway options. type: object properties: + ipForwarding: + description: IPForwarding controls IP forwarding for all traffic on OVN-Kubernetes managed interfaces (such as br-ex). By default this is set to Restricted, and Kubernetes related traffic is still forwarded appropriately, but other IP traffic will not be routed by the OCP node. If there is a desire to allow the host to forward traffic across OVN-Kubernetes managed interfaces, then set this field to "Global". The supported values are "Restricted" and "Global". + type: string routingViaHost: description: RoutingViaHost allows pod egress traffic to exit via the ovn-k8s-mp0 management port into the host before sending it out. If this is not set, traffic will always egress directly from OVN to outside without touching the host stack. Setting this to true means hardware offload will not be supported. Default is false if GatewayConfig is specified. type: boolean @@ -268,6 +271,12 @@ spec: format: int32 default: 50 minimum: 1 + maxLogFiles: + description: maxLogFiles specifies the maximum number of ACL_audit log files that can be present. + type: integer + format: int32 + default: 5 + minimum: 1 rateLimit: description: rateLimit is the approximate maximum number of messages to generate per-second per-node. If unset the default of 20 msg/sec is used. type: integer @@ -445,7 +454,7 @@ spec: items: type: string unsupportedConfigOverrides: - description: 'unsupportedConfigOverrides holds a sparse config that will override any previously set options. It only needs to be the fields to override it will end up overlaying in the following order: 1. hardcoded defaults 2. observedConfig 3. unsupportedConfigOverrides' + description: unsupportedConfigOverrides overrides the final configuration that was computed by the operator. Red Hat does not support the use of this field. Misuse of this field could lead to unexpected behavior or conflict with other configuration options. Seek guidance from the Red Hat support before using this field. Use of this property blocks cluster upgrades, it must be removed before upgrading your cluster. type: object nullable: true x-kubernetes-preserve-unknown-fields: true diff --git a/vendor/github.com/openshift/api/operator/v1/0000_70_dns-operator_00.crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_70_dns-operator_00.crd.yaml index cdad2cfa1..92f24714a 100644 --- a/vendor/github.com/openshift/api/operator/v1/0000_70_dns-operator_00.crd.yaml +++ b/vendor/github.com/openshift/api/operator/v1/0000_70_dns-operator_00.crd.yaml @@ -112,6 +112,12 @@ spec: - RoundRobin - Sequential type: string + protocolStrategy: + description: protocolStrategy specifies the protocol to use for upstream DNS requests. Valid values for protocolStrategy are "TCP" and omitted. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default is to use the protocol of the original client request. "TCP" specifies that the platform should use TCP for all upstream DNS requests, even if the client request uses UDP. "TCP" is useful for UDP-specific issues such as those created by non-compliant upstream resolvers, but may consume more bandwidth or increase DNS response time. Note that protocolStrategy only affects the protocol of DNS requests that CoreDNS makes to upstream resolvers. It does not affect the protocol of DNS requests between clients and CoreDNS. + enum: + - TCP + - "" + type: string transportConfig: description: "transportConfig is used to configure the transport type, server name, and optional custom CA or CA bundle to use when forwarding DNS requests to an upstream resolver. \n The default value is \"\" (empty) which results in a standard cleartext connection being used when forwarding DNS requests to an upstream resolver." properties: @@ -172,6 +178,12 @@ spec: - RoundRobin - Sequential type: string + protocolStrategy: + description: protocolStrategy specifies the protocol to use for upstream DNS requests. Valid values for protocolStrategy are "TCP" and omitted. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default is to use the protocol of the original client request. "TCP" specifies that the platform should use TCP for all upstream DNS requests, even if the client request uses UDP. "TCP" is useful for UDP-specific issues such as those created by non-compliant upstream resolvers, but may consume more bandwidth or increase DNS response time. Note that protocolStrategy only affects the protocol of DNS requests that CoreDNS makes to upstream resolvers. It does not affect the protocol of DNS requests between clients and CoreDNS. + enum: + - TCP + - "" + type: string transportConfig: description: "transportConfig is used to configure the transport type, server name, and optional custom CA or CA bundle to use when forwarding DNS requests to an upstream resolver. \n The default value is \"\" (empty) which results in a standard cleartext connection being used when forwarding DNS requests to an upstream resolver." properties: diff --git a/vendor/github.com/openshift/api/operator/v1/0000_80_csi_snapshot_controller_operator_01_crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_80_csi_snapshot_controller_operator_01_crd.yaml index f59319a60..9046bed78 100644 --- a/vendor/github.com/openshift/api/operator/v1/0000_80_csi_snapshot_controller_operator_01_crd.yaml +++ b/vendor/github.com/openshift/api/operator/v1/0000_80_csi_snapshot_controller_operator_01_crd.yaml @@ -65,7 +65,7 @@ spec: - Trace - TraceAll unsupportedConfigOverrides: - description: 'unsupportedConfigOverrides holds a sparse config that will override any previously set options. It only needs to be the fields to override it will end up overlaying in the following order: 1. hardcoded defaults 2. observedConfig 3. unsupportedConfigOverrides' + description: unsupportedConfigOverrides overrides the final configuration that was computed by the operator. Red Hat does not support the use of this field. Misuse of this field could lead to unexpected behavior or conflict with other configuration options. Seek guidance from the Red Hat support before using this field. Use of this property blocks cluster upgrades, it must be removed before upgrading your cluster. type: object nullable: true x-kubernetes-preserve-unknown-fields: true diff --git a/vendor/github.com/openshift/api/operator/v1/0000_90_cluster_csi_driver_01_config.crd.yaml b/vendor/github.com/openshift/api/operator/v1/0000_90_cluster_csi_driver_01_config.crd.yaml index 5c7496bca..d90a13557 100644 --- a/vendor/github.com/openshift/api/operator/v1/0000_90_cluster_csi_driver_01_config.crd.yaml +++ b/vendor/github.com/openshift/api/operator/v1/0000_90_cluster_csi_driver_01_config.crd.yaml @@ -45,6 +45,7 @@ spec: - diskplugin.csi.alibabacloud.com - vpc.block.csi.ibm.io - powervs.csi.ibm.com + - secrets-store.csi.k8s.io type: string type: object spec: @@ -53,12 +54,84 @@ spec: driverConfig: description: driverConfig can be used to specify platform specific driver configuration. When omitted, this means no opinion and the platform is left to choose reasonable defaults. These defaults are subject to change over time. properties: + aws: + description: aws is used to configure the AWS CSI driver. + properties: + kmsKeyARN: + description: kmsKeyARN sets the cluster default storage class to encrypt volumes with a user-defined KMS key, rather than the default KMS key used by AWS. The value may be either the ARN or Alias ARN of a KMS key. + pattern: ^arn:(aws|aws-cn|aws-us-gov):kms:[a-z0-9-]+:[0-9]{12}:(key|alias)\/.*$ + type: string + type: object + azure: + description: azure is used to configure the Azure CSI driver. + properties: + diskEncryptionSet: + description: diskEncryptionSet sets the cluster default storage class to encrypt volumes with a customer-managed encryption set, rather than the default platform-managed keys. + properties: + name: + description: name is the name of the disk encryption set that will be set on the default storage class. The value should consist of only alphanumberic characters, underscores (_), hyphens, and be at most 80 characters in length. + maxLength: 80 + pattern: ^[a-zA-Z0-9\_-]+$ + type: string + resourceGroup: + description: resourceGroup defines the Azure resource group that contains the disk encryption set. The value should consist of only alphanumberic characters, underscores (_), parentheses, hyphens and periods. The value should not end in a period and be at most 90 characters in length. + maxLength: 90 + pattern: ^[\w\.\-\(\)]*[\w\-\(\)]$ + type: string + subscriptionID: + description: 'subscriptionID defines the Azure subscription that contains the disk encryption set. The value should meet the following conditions: 1. It should be a 128-bit number. 2. It should be 36 characters (32 hexadecimal characters and 4 hyphens) long. 3. It should be displayed in five groups separated by hyphens (-). 4. The first group should be 8 characters long. 5. The second, third, and fourth groups should be 4 characters long. 6. The fifth group should be 12 characters long. An Example SubscrionID: f2007bbf-f802-4a47-9336-cf7c6b89b378' + maxLength: 36 + pattern: ^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$ + type: string + required: + - name + - resourceGroup + - subscriptionID + type: object + type: object driverType: - description: "driverType indicates type of CSI driver for which the driverConfig is being applied to. \n Valid values are: \n * vSphere \n Allows configuration of vsphere CSI driver topology. \n --- Consumers should treat unknown values as a NO-OP." + description: 'driverType indicates type of CSI driver for which the driverConfig is being applied to. Valid values are: AWS, Azure, GCP, vSphere and omitted. Consumers should treat unknown values as a NO-OP.' enum: - "" + - AWS + - Azure + - GCP - vSphere type: string + gcp: + description: gcp is used to configure the GCP CSI driver. + properties: + kmsKey: + description: kmsKey sets the cluster default storage class to encrypt volumes with customer-supplied encryption keys, rather than the default keys managed by GCP. + properties: + keyRing: + description: keyRing is the name of the KMS Key Ring which the KMS Key belongs to. The value should correspond to an existing KMS key ring and should consist of only alphanumeric characters, hyphens (-) and underscores (_), and be at most 63 characters in length. + maxLength: 63 + minLength: 1 + pattern: ^[a-zA-Z0-9\_-]+$ + type: string + location: + description: location is the GCP location in which the Key Ring exists. The value must match an existing GCP location, or "global". Defaults to global, if not set. + pattern: ^[a-zA-Z0-9\_-]+$ + type: string + name: + description: name is the name of the customer-managed encryption key to be used for disk encryption. The value should correspond to an existing KMS key and should consist of only alphanumeric characters, hyphens (-) and underscores (_), and be at most 63 characters in length. + maxLength: 63 + minLength: 1 + pattern: ^[a-zA-Z0-9\_-]+$ + type: string + projectID: + description: projectID is the ID of the Project in which the KMS Key Ring exists. It must be 6 to 30 lowercase letters, digits, or hyphens. It must start with a letter. Trailing hyphens are prohibited. + maxLength: 30 + minLength: 6 + pattern: ^[a-z][a-z0-9-]+[a-z0-9]$ + type: string + required: + - keyRing + - name + - projectID + type: object + type: object vSphere: description: vsphere is used to configure the vsphere CSI driver. properties: @@ -109,7 +182,7 @@ spec: - Removed type: string unsupportedConfigOverrides: - description: 'unsupportedConfigOverrides holds a sparse config that will override any previously set options. It only needs to be the fields to override it will end up overlaying in the following order: 1. hardcoded defaults 2. observedConfig 3. unsupportedConfigOverrides' + description: unsupportedConfigOverrides overrides the final configuration that was computed by the operator. Red Hat does not support the use of this field. Misuse of this field could lead to unexpected behavior or conflict with other configuration options. Seek guidance from the Red Hat support before using this field. Use of this property blocks cluster upgrades, it must be removed before upgrading your cluster. nullable: true type: object x-kubernetes-preserve-unknown-fields: true diff --git a/vendor/github.com/openshift/api/operator/v1/0000_90_cluster_csi_driver_01_config.crd.yaml-patch b/vendor/github.com/openshift/api/operator/v1/0000_90_cluster_csi_driver_01_config.crd.yaml-patch index 2f23e95cb..2a02f97f2 100644 --- a/vendor/github.com/openshift/api/operator/v1/0000_90_cluster_csi_driver_01_config.crd.yaml-patch +++ b/vendor/github.com/openshift/api/operator/v1/0000_90_cluster_csi_driver_01_config.crd.yaml-patch @@ -19,3 +19,4 @@ - diskplugin.csi.alibabacloud.com - vpc.block.csi.ibm.io - powervs.csi.ibm.com + - secrets-store.csi.k8s.io diff --git a/vendor/github.com/openshift/api/operator/v1/0000_70_console-operator.crd.yaml b/vendor/github.com/openshift/api/operator/v1/00_console-operator.crd.yaml similarity index 97% rename from vendor/github.com/openshift/api/operator/v1/0000_70_console-operator.crd.yaml rename to vendor/github.com/openshift/api/operator/v1/00_console-operator.crd.yaml index 5b62106e2..242eecebc 100644 --- a/vendor/github.com/openshift/api/operator/v1/0000_70_console-operator.crd.yaml +++ b/vendor/github.com/openshift/api/operator/v1/00_console-operator.crd.yaml @@ -53,7 +53,20 @@ spec: brand: description: brand is the default branding of the web console which can be overridden by providing the brand field. There is a limited set of specific brand options. This field controls elements of the console such as the logo. Invalid value will prevent a console rollout. type: string - pattern: ^$|^(ocp|origin|okd|dedicated|online|azure)$ + enum: + - openshift + - okd + - online + - ocp + - dedicated + - azure + - OpenShift + - OKD + - Online + - OCP + - Dedicated + - Azure + - ROSA customLogoFile: description: 'customLogoFile replaces the default OpenShift logo in the masthead and about dialog. It is a reference to a ConfigMap in the openshift-config namespace. This can be created with a command like ''oc create configmap custom-logo --from-file=/path/to/file -n openshift-config''. Image size must be less than 1 MB due to constraints on the ConfigMap size. The ConfigMap key should include a file extension so that the console serves the file with the correct MIME type. Recommended logo specifications: Dimensions: Max height of 68px and max width of 200px SVG format preferred' type: object @@ -356,7 +369,7 @@ spec: description: name is the metadata.name of the referenced secret type: string unsupportedConfigOverrides: - description: 'unsupportedConfigOverrides holds a sparse config that will override any previously set options. It only needs to be the fields to override it will end up overlaying in the following order: 1. hardcoded defaults 2. observedConfig 3. unsupportedConfigOverrides' + description: unsupportedConfigOverrides overrides the final configuration that was computed by the operator. Red Hat does not support the use of this field. Misuse of this field could lead to unexpected behavior or conflict with other configuration options. Seek guidance from the Red Hat support before using this field. Use of this property blocks cluster upgrades, it must be removed before upgrading your cluster. type: object nullable: true x-kubernetes-preserve-unknown-fields: true diff --git a/vendor/github.com/openshift/api/operator/v1/stable.console.testsuite.yaml b/vendor/github.com/openshift/api/operator/v1/stable.console.testsuite.yaml index 158c96ad4..065d490e4 100644 --- a/vendor/github.com/openshift/api/operator/v1/stable.console.testsuite.yaml +++ b/vendor/github.com/openshift/api/operator/v1/stable.console.testsuite.yaml @@ -1,6 +1,6 @@ apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this name: "[Stable] Console" -crd: 0000_70_console-operator.crd.yaml +crd: 00_console-operator.crd.yaml tests: onCreate: - name: Should be able to create a minimal Console diff --git a/vendor/github.com/openshift/api/operator/v1/stable.ingresscontroller.testsuite.yaml b/vendor/github.com/openshift/api/operator/v1/stable.ingresscontroller.testsuite.yaml index 01d02ce09..903d8e60c 100644 --- a/vendor/github.com/openshift/api/operator/v1/stable.ingresscontroller.testsuite.yaml +++ b/vendor/github.com/openshift/api/operator/v1/stable.ingresscontroller.testsuite.yaml @@ -13,3 +13,466 @@ tests: kind: IngressController spec: httpEmptyRequestsPolicy: Respond + - name: Should be able to create an IngressController with valid Actions + initial: | + apiVersion: operator.openshift.io/v1 + kind: IngressController + spec: + httpHeaders: + actions: + response: + - name: X-Frame-Options + action: + type: Set + set: + value: DENY + - name: X-Cache-Info + action: + type: Set + set: + value: "not cacheable; meta data too large" + - name: X-XSS-Protection + action: + type: Delete + - name: X-Source + action: + type: Set + set: + value: "%[res.hdr(X-Value),lower]" + request: + - name: Content-Location + action: + type: Set + set: + value: /my-first-blog-post + - name: X-SSL-Client-Cert + action: + type: Set + set: + value: "%{+Q}[ssl_c_der,base64]" + - name: Content-Language + action: + type: Delete + - name: X-Target + action: + type: Set + set: + value: "%[req.hdr(host),lower]" + - name: X-Conditional + action: + type: Set + set: + value: "%[req.hdr(Host)] if foo" + - name: X-Condition + action: + type: Set + set: + value: "%[req.hdr(Host)]\ if\ foo" + expected: | + apiVersion: operator.openshift.io/v1 + kind: IngressController + spec: + httpEmptyRequestsPolicy: Respond + httpHeaders: + actions: + response: + - name: X-Frame-Options + action: + type: Set + set: + value: DENY + - name: X-Cache-Info + action: + type: Set + set: + value: "not cacheable; meta data too large" + - name: X-XSS-Protection + action: + type: Delete + - name: X-Source + action: + type: Set + set: + value: "%[res.hdr(X-Value),lower]" + request: + - name: Content-Location + action: + type: Set + set: + value: /my-first-blog-post + - name: X-SSL-Client-Cert + action: + type: Set + set: + value: "%{+Q}[ssl_c_der,base64]" + - name: Content-Language + action: + type: Delete + - name: X-Target + action: + type: Set + set: + value: "%[req.hdr(host),lower]" + - name: X-Conditional + action: + type: Set + set: + value: "%[req.hdr(Host)] if foo" + - name: X-Condition + action: + type: Set + set: + value: "%[req.hdr(Host)]\ if\ foo" + - name: Should not allow to set/delete HSTS header. + initial: | + apiVersion: operator.openshift.io/v1 + kind: IngressController + metadata: + name: default-hsts + namespace: openshift-ingress-operator + spec: + httpHeaders: + actions: + response: + - name: X-Frame-Options + action: + type: Set + set: + value: DENY + - name: Strict-Transport-Security + action: + type: Delete + request: + - name: Content-Location + action: + type: Set + set: + value: /my-first-blog-post + - name: Content-Language + action: + type: Delete + expectedError: "strict-transport-security header may not be modified via header actions" + - name: Should not allow to set/delete Proxy header. + initial: | + apiVersion: operator.openshift.io/v1 + kind: IngressController + metadata: + name: default-hsts + namespace: openshift-ingress-operator + spec: + httpHeaders: + actions: + response: + - name: X-Frame-Options + action: + type: Set + set: + value: DENY + request: + - name: Proxy + action: + type: Set + set: + value: example.xyz + - name: Content-Location + action: + type: Set + set: + value: /my-first-blog-post + - name: Content-Language + action: + type: Delete + expectedError: "proxy header may not be modified via header actions" + - name: Should not allow to set/delete Host header. + initial: | + apiVersion: operator.openshift.io/v1 + kind: IngressController + metadata: + name: default-hsts + namespace: openshift-ingress-operator + spec: + httpHeaders: + actions: + response: + - name: X-Frame-Options + action: + type: Set + set: + value: DENY + request: + - name: Host + action: + type: Set + set: + value: example.xyz + - name: Content-Location + action: + type: Set + set: + value: /my-first-blog-post + - name: Content-Language + action: + type: Delete + expectedError: "host header may not be modified via header actions" + - name: Should not allow to set/delete cookie header. + initial: | + apiVersion: operator.openshift.io/v1 + kind: IngressController + metadata: + name: default-hsts + namespace: openshift-ingress-operator + spec: + httpHeaders: + actions: + response: + - name: X-Frame-Options + action: + type: Set + set: + value: DENY + request: + - name: Cookie + action: + type: Set + set: + value: "PHPSESSID=298zf09hf012fh2; csrftoken=u32t4o3tb3gg43; _gat=1" + - name: Content-Location + action: + type: Set + set: + value: /my-first-blog-post + - name: Content-Language + action: + type: Delete + expectedError: "cookie header may not be modified via header actions" + - name: Should not allow to set/delete set-cookie header. + initial: | + apiVersion: operator.openshift.io/v1 + kind: IngressController + metadata: + name: default-hsts + namespace: openshift-ingress-operator + spec: + httpHeaders: + actions: + response: + - name: Set-Cookie + action: + type: Set + set: + value: "sessionId=e8bb43229de9; Domain=foo.example.com" + request: + - name: Content-Location + action: + type: Set + set: + value: /my-first-blog-post + - name: Content-Language + action: + type: Delete + expectedError: "set-cookie header may not be modified via header actions" + - name: Should not allow to set/delete dynamic headers with unclosed braces. + initial: | + apiVersion: operator.openshift.io/v1 + kind: IngressController + metadata: + name: default-unclosed-braces + namespace: openshift-ingress-operator + spec: + httpHeaders: + actions: + request: + - name: Content-Location + action: + type: Set + set: + value: /my-first-blog-post + - name: Content-Language + action: + type: Delete + - name: expires + action: + type: Set + set: + value: "%[req.hdr(host),lower" + expectedError: "Either the header value provided is not in correct format or the sample fetcher/converter specified is not allowed. The dynamic header value will be interpreted as an HAProxy format string as defined in http://cbonte.github.io/haproxy-dconv/2.6/configuration.html#8.2.6 and may use HAProxy's %[] syntax and otherwise must be a valid HTTP header value as defined in https://datatracker.ietf.org/doc/html/rfc7230#section-3.2. Sample fetchers allowed are req.hdr, ssl_c_der. Converters allowed are lower, base64." + - name: Should not allow to set dynamic response header values with not allowed sample fetchers. + initial: | + apiVersion: operator.openshift.io/v1 + kind: IngressController + metadata: + name: default-not-allowed-values + namespace: openshift-ingress-operator + spec: + httpHeaders: + actions: + response: + - name: X-Frame-Options + action: + type: Set + set: + value: DENY + - name: X-Target + action: + type: Set + set: + value: "%[req.hdrs(host),lower]" + expectedError: "Either the header value provided is not in correct format or the sample fetcher/converter specified is not allowed. The dynamic header value will be interpreted as an HAProxy format string as defined in http://cbonte.github.io/haproxy-dconv/2.6/configuration.html#8.2.6 and may use HAProxy's %[] syntax and otherwise must be a valid HTTP header value as defined in https://datatracker.ietf.org/doc/html/rfc7230#section-3.2. Sample fetchers allowed are res.hdr, ssl_c_der. Converters allowed are lower, base64." + - name: Should not allow empty value in response. + initial: | + apiVersion: operator.openshift.io/v1 + kind: IngressController + metadata: + name: default-not-allowed-values + namespace: openshift-ingress-operator + spec: + httpHeaders: + actions: + response: + - name: X-Frame-Options + action: + type: Set + set: + value: + expectedError: 'IngressController.operator.openshift.io "default-not-allowed-values" is invalid: [spec.httpHeaders.actions.response[0].action.set.value: Required value, : Invalid value: "null": some validation rules were not checked because the object was invalid; correct the existing errors to complete validation]' + - name: Should not allow empty value in request. + initial: | + apiVersion: operator.openshift.io/v1 + kind: IngressController + metadata: + name: default-not-allowed-values + namespace: openshift-ingress-operator + spec: + httpHeaders: + actions: + request: + - name: X-Frame-Options + action: + type: Set + set: + value: + expectedError: 'IngressController.operator.openshift.io "default-not-allowed-values" is invalid: [spec.httpHeaders.actions.request[0].action.set.value: Required value, : Invalid value: "null": some validation rules were not checked because the object was invalid; correct the existing errors to complete validation]' + - name: Should not allow to set dynamic response header values with not allowed converters. + initial: | + apiVersion: operator.openshift.io/v1 + kind: IngressController + metadata: + name: default-not-allowed-values + namespace: openshift-ingress-operator + spec: + httpHeaders: + actions: + response: + - name: X-Frame-Options + action: + type: Set + set: + value: DENY + - name: X-Source + action: + type: Set + set: + value: "%{+Q}[ssl_c_der,bogus]" + expectedError: "Either the header value provided is not in correct format or the sample fetcher/converter specified is not allowed. The dynamic header value will be interpreted as an HAProxy format string as defined in http://cbonte.github.io/haproxy-dconv/2.6/configuration.html#8.2.6 and may use HAProxy's %[] syntax and otherwise must be a valid HTTP header value as defined in https://datatracker.ietf.org/doc/html/rfc7230#section-3.2. Sample fetchers allowed are res.hdr, ssl_c_der. Converters allowed are lower, base64." + - name: Should not allow to set dynamic request header values containing sample fetcher res.hdr. + initial: | + apiVersion: operator.openshift.io/v1 + kind: IngressController + metadata: + name: default-not-allowed-values + namespace: openshift-ingress-operator + spec: + httpHeaders: + actions: + request: + - name: X-Target + action: + type: Set + set: + value: "%[res.hdr(X-Value),lower]" + expectedError: "Either the header value provided is not in correct format or the sample fetcher/converter specified is not allowed. The dynamic header value will be interpreted as an HAProxy format string as defined in http://cbonte.github.io/haproxy-dconv/2.6/configuration.html#8.2.6 and may use HAProxy's %[] syntax and otherwise must be a valid HTTP header value as defined in https://datatracker.ietf.org/doc/html/rfc7230#section-3.2. Sample fetchers allowed are req.hdr, ssl_c_der. Converters allowed are lower, base64." + - name: Should not allow to set dynamic response headers value containing sample fetcher req.hdr. + initial: | + apiVersion: operator.openshift.io/v1 + kind: IngressController + metadata: + name: default-not-allowed-values + namespace: openshift-ingress-operator + spec: + httpHeaders: + actions: + response: + - name: X-Source + action: + type: Set + set: + value: "%[req.hdr(host),lower]" + expectedError: "Either the header value provided is not in correct format or the sample fetcher/converter specified is not allowed. The dynamic header value will be interpreted as an HAProxy format string as defined in http://cbonte.github.io/haproxy-dconv/2.6/configuration.html#8.2.6 and may use HAProxy's %[] syntax and otherwise must be a valid HTTP header value as defined in https://datatracker.ietf.org/doc/html/rfc7230#section-3.2. Sample fetchers allowed are res.hdr, ssl_c_der. Converters allowed are lower, base64." + - name: Should not allow to set dynamic request header values with not allowed converters. + initial: | + apiVersion: operator.openshift.io/v1 + kind: IngressController + metadata: + name: default-not-allowed-values + namespace: openshift-ingress-operator + spec: + httpHeaders: + actions: + request: + - name: X-SSL-Client-Cert + action: + type: Set + set: + value: "%{+Q}[ssl_c_der,hello]" + - name: Content-Language + action: + type: Delete + expectedError: "Either the header value provided is not in correct format or the sample fetcher/converter specified is not allowed. The dynamic header value will be interpreted as an HAProxy format string as defined in http://cbonte.github.io/haproxy-dconv/2.6/configuration.html#8.2.6 and may use HAProxy's %[] syntax and otherwise must be a valid HTTP header value as defined in https://datatracker.ietf.org/doc/html/rfc7230#section-3.2. Sample fetchers allowed are req.hdr, ssl_c_der. Converters allowed are lower, base64." + - name: Should not allow to set dynamic request header values with not allowed sample fetchers. + initial: | + apiVersion: operator.openshift.io/v1 + kind: IngressController + metadata: + name: default-not-allowed-values + namespace: openshift-ingress-operator + spec: + httpHeaders: + actions: + request: + - name: X-SSL-Client-Cert + action: + type: Set + set: + value: "%{+Q}[ssl_c_der1234,base64]" + - name: Content-Language + action: + type: Delete + expectedError: "Either the header value provided is not in correct format or the sample fetcher/converter specified is not allowed. The dynamic header value will be interpreted as an HAProxy format string as defined in http://cbonte.github.io/haproxy-dconv/2.6/configuration.html#8.2.6 and may use HAProxy's %[] syntax and otherwise must be a valid HTTP header value as defined in https://datatracker.ietf.org/doc/html/rfc7230#section-3.2. Sample fetchers allowed are req.hdr, ssl_c_der. Converters allowed are lower, base64." + - name: Should be required to specify the set field when the discriminant type is Set. + initial: | + apiVersion: operator.openshift.io/v1 + kind: IngressController + spec: + httpHeaders: + actions: + response: + - name: X-Frame-Options + action: + type: Set + expectedError: "set is required when type is Set, and forbidden otherwise" + - name: Should be able to add set field only when discriminant type is Set. + initial: | + apiVersion: operator.openshift.io/v1 + kind: IngressController + metadata: + name: default-not-allowed-values + namespace: openshift-ingress-operator + spec: + httpHeaders: + actions: + response: + - name: X-Frame-Options + action: + set: + value: DENY + expectedError: 'IngressController.operator.openshift.io "default-not-allowed-values" is invalid: [spec.httpHeaders.actions.response[0].action.type: Required value, : Invalid value: "null": some validation rules were not checked because the object was invalid; correct the existing errors to complete validation]' diff --git a/vendor/github.com/openshift/api/operator/v1/stable.storage.testsuite.yaml b/vendor/github.com/openshift/api/operator/v1/stable.storage.testsuite.yaml index 42903f22d..98afa6ea7 100644 --- a/vendor/github.com/openshift/api/operator/v1/stable.storage.testsuite.yaml +++ b/vendor/github.com/openshift/api/operator/v1/stable.storage.testsuite.yaml @@ -14,3 +14,100 @@ tests: spec: logLevel: Normal operatorLogLevel: Normal + onCreate: + - name: Should allow creating Storage with vsphere migration enabled + initial: | + apiVersion: operator.openshift.io/v1 + kind: Storage + spec: + vsphereStorageDriver: CSIWithMigrationDriver + expected: | + apiVersion: operator.openshift.io/v1 + kind: Storage + spec: + vsphereStorageDriver: CSIWithMigrationDriver + logLevel: Normal + operatorLogLevel: Normal + onCreate: + - name: Should not allow creating Storage with vsphere migration disabled + initial: | + apiVersion: operator.openshift.io/v1 + kind: Storage + spec: + vsphereStorageDriver: LegacyDeprecatedInTreeDriver + expectedError: "VSphereStorageDriver can not be set to LegacyDeprecatedInTreeDriver" + onUpdate: + - name: Should allow enabling CSI migration for vSphere + initial: | + apiVersion: operator.openshift.io/v1 + kind: Storage + spec: {} # No spec is required + updated: | + apiVersion: operator.openshift.io/v1 + kind: Storage + spec: + vsphereStorageDriver: CSIWithMigrationDriver + expected: | + apiVersion: operator.openshift.io/v1 + kind: Storage + spec: + vsphereStorageDriver: CSIWithMigrationDriver + logLevel: Normal + operatorLogLevel: Normal + - name: Should not allow disabling CSI migration for vSphere + initial: | + apiVersion: operator.openshift.io/v1 + kind: Storage + spec: {} # No spec is required + updated: | + apiVersion: operator.openshift.io/v1 + kind: Storage + spec: + vsphereStorageDriver: LegacyDeprecatedInTreeDriver + expectedError: "VSphereStorageDriver can not be set to LegacyDeprecatedInTreeDriver" + - name: Should not allow changing CSIWithMigrationDriver to LegacyDeprecatedInTreeDriver + initial: | + apiVersion: operator.openshift.io/v1 + kind: Storage + spec: + vsphereStorageDriver: CSIWithMigrationDriver + updated: | + apiVersion: operator.openshift.io/v1 + kind: Storage + spec: + vsphereStorageDriver: LegacyDeprecatedInTreeDriver + expectedError: "VSphereStorageDriver can not be set to LegacyDeprecatedInTreeDriver" + - name: Should allow changing CSIWithMigrationDriver to empty string + initial: | + apiVersion: operator.openshift.io/v1 + kind: Storage + spec: + vsphereStorageDriver: CSIWithMigrationDriver + updated: | + apiVersion: operator.openshift.io/v1 + kind: Storage + spec: + vsphereStorageDriver: "" + expected: | + apiVersion: operator.openshift.io/v1 + kind: Storage + spec: + vsphereStorageDriver: "" + logLevel: Normal + operatorLogLevel: Normal + - name: Should allow unsetting VSphereStorageDriver once it is set + initial: | + apiVersion: operator.openshift.io/v1 + kind: Storage + spec: + vsphereStorageDriver: CSIWithMigrationDriver + updated: | + apiVersion: operator.openshift.io/v1 + kind: Storage + spec: {} + expected: | + apiVersion: operator.openshift.io/v1 + kind: Storage + spec: + logLevel: Normal + operatorLogLevel: Normal diff --git a/vendor/github.com/openshift/api/operator/v1/types.go b/vendor/github.com/openshift/api/operator/v1/types.go index 5f731593d..8b650f329 100644 --- a/vendor/github.com/openshift/api/operator/v1/types.go +++ b/vendor/github.com/openshift/api/operator/v1/types.go @@ -10,7 +10,10 @@ import ( // Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. // +openshift:compatibility-gen:internal type MyOperatorResource struct { - metav1.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata"` // +kubebuilder:validation:Required @@ -70,11 +73,11 @@ type OperatorSpec struct { // +kubebuilder:default=Normal OperatorLogLevel LogLevel `json:"operatorLogLevel,omitempty"` - // unsupportedConfigOverrides holds a sparse config that will override any previously set options. It only needs to be the fields to override - // it will end up overlaying in the following order: - // 1. hardcoded defaults - // 2. observedConfig - // 3. unsupportedConfigOverrides + // unsupportedConfigOverrides overrides the final configuration that was computed by the operator. + // Red Hat does not support the use of this field. + // Misuse of this field could lead to unexpected behavior or conflict with other configuration options. + // Seek guidance from the Red Hat support before using this field. + // Use of this property blocks cluster upgrades, it must be removed before upgrading your cluster. // +optional // +nullable // +kubebuilder:pruning:PreserveUnknownFields diff --git a/vendor/github.com/openshift/api/operator/v1/types_authentication.go b/vendor/github.com/openshift/api/operator/v1/types_authentication.go index 80aa55f39..b2cf95816 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_authentication.go +++ b/vendor/github.com/openshift/api/operator/v1/types_authentication.go @@ -13,7 +13,10 @@ import ( // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 type Authentication struct { - metav1.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty"` // +kubebuilder:validation:Required @@ -51,6 +54,9 @@ type OAuthAPIServerStatus struct { // +openshift:compatibility-gen:level=1 type AuthenticationList struct { metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata"` Items []Authentication `json:"items"` diff --git a/vendor/github.com/openshift/api/operator/v1/types_cloudcredential.go b/vendor/github.com/openshift/api/operator/v1/types_cloudcredential.go index 8ad336fa2..7f16e18a6 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_cloudcredential.go +++ b/vendor/github.com/openshift/api/operator/v1/types_cloudcredential.go @@ -13,7 +13,10 @@ import ( // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 type CloudCredential struct { - metav1.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty"` // +kubebuilder:validation:Required @@ -75,6 +78,9 @@ type CloudCredentialStatus struct { // +openshift:compatibility-gen:level=1 type CloudCredentialList struct { metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata"` Items []CloudCredential `json:"items"` diff --git a/vendor/github.com/openshift/api/operator/v1/types_config.go b/vendor/github.com/openshift/api/operator/v1/types_config.go index 025ff252d..39b011717 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_config.go +++ b/vendor/github.com/openshift/api/operator/v1/types_config.go @@ -14,7 +14,10 @@ import ( // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 type Config struct { - metav1.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata"` // spec is the specification of the desired behavior of the Config Operator. @@ -43,6 +46,9 @@ type ConfigStatus struct { // +openshift:compatibility-gen:level=1 type ConfigList struct { metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata"` // Items contains the items diff --git a/vendor/github.com/openshift/api/operator/v1/types_console.go b/vendor/github.com/openshift/api/operator/v1/types_console.go index f43a35648..15d653ae4 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_console.go +++ b/vendor/github.com/openshift/api/operator/v1/types_console.go @@ -15,7 +15,10 @@ import ( // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 type Console struct { - metav1.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty"` // +kubebuilder:validation:Required @@ -92,6 +95,7 @@ type ConsoleCustomization struct { // providing the brand field. There is a limited set of specific brand options. // This field controls elements of the console such as the logo. // Invalid value will prevent a console rollout. + // +kubebuilder:validation:Enum:=openshift;okd;online;ocp;dedicated;azure;OpenShift;OKD;Online;OCP;Dedicated;Azure;ROSA Brand Brand `json:"brand,omitempty"` // documentationBaseURL links to external documentation are shown in various sections // of the web console. Providing documentationBaseURL will override the default @@ -335,22 +339,35 @@ type PinnedResourceReference struct { } // Brand is a specific supported brand within the console. -// +kubebuilder:validation:Pattern=`^$|^(ocp|origin|okd|dedicated|online|azure)$` type Brand string const ( + // Legacy branding for OpenShift + BrandOpenShiftLegacy Brand = "openshift" + // Legacy branding for The Origin Community Distribution of Kubernetes + BrandOKDLegacy Brand = "okd" + // Legacy branding for OpenShift Online + BrandOnlineLegacy Brand = "online" + // Legacy branding for OpenShift Container Platform + BrandOCPLegacy Brand = "ocp" + // Legacy branding for OpenShift Dedicated + BrandDedicatedLegacy Brand = "dedicated" + // Legacy branding for Azure Red Hat OpenShift + BrandAzureLegacy Brand = "azure" // Branding for OpenShift - BrandOpenShift Brand = "openshift" + BrandOpenShift Brand = "OpenShift" // Branding for The Origin Community Distribution of Kubernetes - BrandOKD Brand = "okd" + BrandOKD Brand = "OKD" // Branding for OpenShift Online - BrandOnline Brand = "online" + BrandOnline Brand = "Online" // Branding for OpenShift Container Platform - BrandOCP Brand = "ocp" + BrandOCP Brand = "OCP" // Branding for OpenShift Dedicated - BrandDedicated Brand = "dedicated" + BrandDedicated Brand = "Dedicated" // Branding for Azure Red Hat OpenShift - BrandAzure Brand = "azure" + BrandAzure Brand = "Azure" + // Branding for Red Hat OpenShift Service on AWS + BrandROSA Brand = "ROSA" ) // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -359,6 +376,9 @@ const ( // +openshift:compatibility-gen:level=1 type ConsoleList struct { metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata"` Items []Console `json:"items"` diff --git a/vendor/github.com/openshift/api/operator/v1/types_csi_cluster_driver.go b/vendor/github.com/openshift/api/operator/v1/types_csi_cluster_driver.go index b29534015..d2f058910 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_csi_cluster_driver.go +++ b/vendor/github.com/openshift/api/operator/v1/types_csi_cluster_driver.go @@ -24,7 +24,10 @@ import ( // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 type ClusterCSIDriver struct { - metav1.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty"` // spec holds user settable values for configuration @@ -80,6 +83,7 @@ const ( AlibabaDiskCSIDriver CSIDriverName = "diskplugin.csi.alibabacloud.com" IBMVPCBlockCSIDriver CSIDriverName = "vpc.block.csi.ibm.io" IBMPowerVSBlockCSIDriver CSIDriverName = "powervs.csi.ibm.com" + SecretsStoreCSIDriver CSIDriverName = "secrets-store.csi.k8s.io" ) // ClusterCSIDriverSpec is the desired behavior of CSI driver operator @@ -105,10 +109,13 @@ type ClusterCSIDriverSpec struct { } // CSIDriverType indicates type of CSI driver being configured. -// +kubebuilder:validation:Enum="";vSphere +// +kubebuilder:validation:Enum="";AWS;Azure;GCP;vSphere type CSIDriverType string const ( + AWSDriverType CSIDriverType = "AWS" + AzureDriverType CSIDriverType = "Azure" + GCPDriverType CSIDriverType = "GCP" VSphereDriverType CSIDriverType = "vSphere" ) @@ -118,25 +125,129 @@ const ( type CSIDriverConfigSpec struct { // driverType indicates type of CSI driver for which the // driverConfig is being applied to. - // - // Valid values are: - // - // * vSphere - // - // Allows configuration of vsphere CSI driver topology. - // - // --- + // Valid values are: AWS, Azure, GCP, vSphere and omitted. // Consumers should treat unknown values as a NO-OP. - // // +kubebuilder:validation:Required // +unionDiscriminator DriverType CSIDriverType `json:"driverType"` + // aws is used to configure the AWS CSI driver. + // +optional + AWS *AWSCSIDriverConfigSpec `json:"aws,omitempty"` + + // azure is used to configure the Azure CSI driver. + // +optional + Azure *AzureCSIDriverConfigSpec `json:"azure,omitempty"` + + // gcp is used to configure the GCP CSI driver. + // +optional + GCP *GCPCSIDriverConfigSpec `json:"gcp,omitempty"` + // vsphere is used to configure the vsphere CSI driver. // +optional VSphere *VSphereCSIDriverConfigSpec `json:"vSphere,omitempty"` } +// AWSCSIDriverConfigSpec defines properties that can be configured for the AWS CSI driver. +type AWSCSIDriverConfigSpec struct { + // kmsKeyARN sets the cluster default storage class to encrypt volumes with a user-defined KMS key, + // rather than the default KMS key used by AWS. + // The value may be either the ARN or Alias ARN of a KMS key. + // +kubebuilder:validation:Pattern:=`^arn:(aws|aws-cn|aws-us-gov):kms:[a-z0-9-]+:[0-9]{12}:(key|alias)\/.*$` + // +optional + KMSKeyARN string `json:"kmsKeyARN,omitempty"` +} + +// AzureDiskEncryptionSet defines the configuration for a disk encryption set. +type AzureDiskEncryptionSet struct { + // subscriptionID defines the Azure subscription that contains the disk encryption set. + // The value should meet the following conditions: + // 1. It should be a 128-bit number. + // 2. It should be 36 characters (32 hexadecimal characters and 4 hyphens) long. + // 3. It should be displayed in five groups separated by hyphens (-). + // 4. The first group should be 8 characters long. + // 5. The second, third, and fourth groups should be 4 characters long. + // 6. The fifth group should be 12 characters long. + // An Example SubscrionID: f2007bbf-f802-4a47-9336-cf7c6b89b378 + // +kubebuilder:validation:Required + // +kubebuilder:validation:MaxLength:=36 + // +kubebuilder:validation:Pattern:=`^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$` + SubscriptionID string `json:"subscriptionID"` + + // resourceGroup defines the Azure resource group that contains the disk encryption set. + // The value should consist of only alphanumberic characters, + // underscores (_), parentheses, hyphens and periods. + // The value should not end in a period and be at most 90 characters in + // length. + // +kubebuilder:validation:Required + // +kubebuilder:validation:MaxLength:=90 + // +kubebuilder:validation:Pattern:=`^[\w\.\-\(\)]*[\w\-\(\)]$` + ResourceGroup string `json:"resourceGroup"` + + // name is the name of the disk encryption set that will be set on the default storage class. + // The value should consist of only alphanumberic characters, + // underscores (_), hyphens, and be at most 80 characters in length. + // +kubebuilder:validation:Required + // +kubebuilder:validation:MaxLength:=80 + // +kubebuilder:validation:Pattern:=`^[a-zA-Z0-9\_-]+$` + Name string `json:"name"` +} + +// AzureCSIDriverConfigSpec defines properties that can be configured for the Azure CSI driver. +type AzureCSIDriverConfigSpec struct { + // diskEncryptionSet sets the cluster default storage class to encrypt volumes with a + // customer-managed encryption set, rather than the default platform-managed keys. + // +optional + DiskEncryptionSet *AzureDiskEncryptionSet `json:"diskEncryptionSet,omitempty"` +} + +// GCPKMSKeyReference gathers required fields for looking up a GCP KMS Key +type GCPKMSKeyReference struct { + // name is the name of the customer-managed encryption key to be used for disk encryption. + // The value should correspond to an existing KMS key and should + // consist of only alphanumeric characters, hyphens (-) and underscores (_), + // and be at most 63 characters in length. + // +kubebuilder:validation:Pattern:=`^[a-zA-Z0-9\_-]+$` + // +kubebuilder:validation:MinLength:=1 + // +kubebuilder:validation:MaxLength:=63 + // +kubebuilder:validation:Required + Name string `json:"name"` + + // keyRing is the name of the KMS Key Ring which the KMS Key belongs to. + // The value should correspond to an existing KMS key ring and should + // consist of only alphanumeric characters, hyphens (-) and underscores (_), + // and be at most 63 characters in length. + // +kubebuilder:validation:Pattern:=`^[a-zA-Z0-9\_-]+$` + // +kubebuilder:validation:MinLength:=1 + // +kubebuilder:validation:MaxLength:=63 + // +kubebuilder:validation:Required + KeyRing string `json:"keyRing"` + + // projectID is the ID of the Project in which the KMS Key Ring exists. + // It must be 6 to 30 lowercase letters, digits, or hyphens. + // It must start with a letter. Trailing hyphens are prohibited. + // +kubebuilder:validation:Pattern:=`^[a-z][a-z0-9-]+[a-z0-9]$` + // +kubebuilder:validation:MinLength:=6 + // +kubebuilder:validation:MaxLength:=30 + // +kubebuilder:validation:Required + ProjectID string `json:"projectID"` + + // location is the GCP location in which the Key Ring exists. + // The value must match an existing GCP location, or "global". + // Defaults to global, if not set. + // +kubebuilder:validation:Pattern:=`^[a-zA-Z0-9\_-]+$` + // +optional + Location string `json:"location,omitempty"` +} + +// GCPCSIDriverConfigSpec defines properties that can be configured for the GCP CSI driver. +type GCPCSIDriverConfigSpec struct { + // kmsKey sets the cluster default storage class to encrypt volumes with customer-supplied + // encryption keys, rather than the default keys managed by GCP. + // +optional + KMSKey *GCPKMSKeyReference `json:"kmsKey,omitempty"` +} + // VSphereCSIDriverConfigSpec defines properties that // can be configured for vsphere CSI driver. type VSphereCSIDriverConfigSpec struct { @@ -163,6 +274,10 @@ type ClusterCSIDriverStatus struct { // +openshift:compatibility-gen:level=1 type ClusterCSIDriverList struct { metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata,omitempty"` - Items []ClusterCSIDriver `json:"items"` + + Items []ClusterCSIDriver `json:"items"` } diff --git a/vendor/github.com/openshift/api/operator/v1/types_csi_snapshot.go b/vendor/github.com/openshift/api/operator/v1/types_csi_snapshot.go index 21db5df0a..f552711b0 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_csi_snapshot.go +++ b/vendor/github.com/openshift/api/operator/v1/types_csi_snapshot.go @@ -13,7 +13,10 @@ import ( // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 type CSISnapshotController struct { - metav1.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty"` // spec holds user settable values for configuration @@ -45,6 +48,10 @@ type CSISnapshotControllerStatus struct { // +openshift:compatibility-gen:level=1 type CSISnapshotControllerList struct { metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata,omitempty"` - Items []CSISnapshotController `json:"items"` + + Items []CSISnapshotController `json:"items"` } diff --git a/vendor/github.com/openshift/api/operator/v1/types_dns.go b/vendor/github.com/openshift/api/operator/v1/types_dns.go index ae5f62f3b..55e98d1f8 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_dns.go +++ b/vendor/github.com/openshift/api/operator/v1/types_dns.go @@ -24,7 +24,10 @@ import ( // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 type DNS struct { - metav1.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty"` // spec is the specification of the desired behavior of the DNS. @@ -287,6 +290,25 @@ type ForwardPlugin struct { // // +optional TransportConfig DNSTransportConfig `json:"transportConfig,omitempty"` + + + // protocolStrategy specifies the protocol to use for upstream DNS + // requests. + // Valid values for protocolStrategy are "TCP" and omitted. + // When omitted, this means no opinion and the platform is left to choose + // a reasonable default, which is subject to change over time. + // The current default is to use the protocol of the original client request. + // "TCP" specifies that the platform should use TCP for all upstream DNS requests, + // even if the client request uses UDP. + // "TCP" is useful for UDP-specific issues such as those created by + // non-compliant upstream resolvers, but may consume more bandwidth or + // increase DNS response time. Note that protocolStrategy only affects + // the protocol of DNS requests that CoreDNS makes to upstream resolvers. + // It does not affect the protocol of DNS requests between clients and + // CoreDNS. + // + // +optional + ProtocolStrategy ProtocolStrategy `json:"protocolStrategy"` } // UpstreamResolvers defines a schema for configuring the CoreDNS forward plugin in the @@ -329,6 +351,24 @@ type UpstreamResolvers struct { // // +optional TransportConfig DNSTransportConfig `json:"transportConfig,omitempty"` + + // protocolStrategy specifies the protocol to use for upstream DNS + // requests. + // Valid values for protocolStrategy are "TCP" and omitted. + // When omitted, this means no opinion and the platform is left to choose + // a reasonable default, which is subject to change over time. + // The current default is to use the protocol of the original client request. + // "TCP" specifies that the platform should use TCP for all upstream DNS requests, + // even if the client request uses UDP. + // "TCP" is useful for UDP-specific issues such as those created by + // non-compliant upstream resolvers, but may consume more bandwidth or + // increase DNS response time. Note that protocolStrategy only affects + // the protocol of DNS requests that CoreDNS makes to upstream resolvers. + // It does not affect the protocol of DNS requests between clients and + // CoreDNS. + // + // +optional + ProtocolStrategy ProtocolStrategy `json:"protocolStrategy"` } // Upstream can either be of type SystemResolvConf, or of type Network. @@ -376,6 +416,23 @@ const ( NetworkResolverType UpstreamType = "Network" ) +// ProtocolStrategy is a preference for the protocol to use for DNS queries. +// + --- +// + When consumers observe an unknown value, they should use the default strategy. +// +kubebuilder:validation:Enum:=TCP;"" +type ProtocolStrategy string + +var ( + // ProtocolStrategyDefault specifies no opinion for DNS protocol. + // If empty, the default behavior of CoreDNS is used. Currently, this means that CoreDNS uses the protocol of the + // originating client request as the upstream protocol. + // Note that the default behavior of CoreDNS is subject to change. + ProtocolStrategyDefault ProtocolStrategy = "" + + // ProtocolStrategyTCP instructs CoreDNS to always use TCP, regardless of the originating client's request protocol. + ProtocolStrategyTCP ProtocolStrategy = "TCP" +) + // DNSNodePlacement describes the node scheduling configuration for DNS pods. type DNSNodePlacement struct { // nodeSelector is the node selector applied to DNS pods. @@ -463,6 +520,9 @@ type DNSStatus struct { // +openshift:compatibility-gen:level=1 type DNSList struct { metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata,omitempty"` Items []DNS `json:"items"` diff --git a/vendor/github.com/openshift/api/operator/v1/types_etcd.go b/vendor/github.com/openshift/api/operator/v1/types_etcd.go index 6cd593ced..f0b525a40 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_etcd.go +++ b/vendor/github.com/openshift/api/operator/v1/types_etcd.go @@ -13,7 +13,10 @@ import ( // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 type Etcd struct { - metav1.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata"` // +kubebuilder:validation:Required @@ -39,6 +42,9 @@ type EtcdStatus struct { // +openshift:compatibility-gen:level=1 type EtcdList struct { metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata"` // Items contains the items diff --git a/vendor/github.com/openshift/api/operator/v1/types_ingress.go b/vendor/github.com/openshift/api/operator/v1/types_ingress.go index 0a0b3ca74..3d9f512a9 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_ingress.go +++ b/vendor/github.com/openshift/api/operator/v1/types_ingress.go @@ -32,7 +32,10 @@ import ( // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 type IngressController struct { - metav1.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty"` // spec is the specification of the desired behavior of the IngressController. @@ -280,18 +283,18 @@ type HTTPCompressionPolicy struct { // // The format should follow the Content-Type definition in RFC 1341: // Content-Type := type "/" subtype *[";" parameter] -// - The type in Content-Type can be one of: -// application, audio, image, message, multipart, text, video, or a custom -// type preceded by "X-" and followed by a token as defined below. -// - The token is a string of at least one character, and not containing white -// space, control characters, or any of the characters in the tspecials set. -// - The tspecials set contains the characters ()<>@,;:\"/[]?.= -// - The subtype in Content-Type is also a token. -// - The optional parameter/s following the subtype are defined as: -// token "=" (token / quoted-string) -// - The quoted-string, as defined in RFC 822, is surrounded by double quotes -// and can contain white space plus any character EXCEPT \, ", and CR. -// It can also contain any single ASCII character as long as it is escaped by \. +// - The type in Content-Type can be one of: +// application, audio, image, message, multipart, text, video, or a custom +// type preceded by "X-" and followed by a token as defined below. +// - The token is a string of at least one character, and not containing white +// space, control characters, or any of the characters in the tspecials set. +// - The tspecials set contains the characters ()<>@,;:\"/[]?.= +// - The subtype in Content-Type is also a token. +// - The optional parameter/s following the subtype are defined as: +// token "=" (token / quoted-string) +// - The quoted-string, as defined in RFC 822, is surrounded by double quotes +// and can contain white space plus any character EXCEPT \, ", and CR. +// It can also contain any single ASCII character as long as it is escaped by \. // // +kubebuilder:validation:Pattern=`^(?i)(x-[^][ ()\\<>@,;:"/?.=\x00-\x1F\x7F]+|application|audio|image|message|multipart|text|video)/[^][ ()\\<>@,;:"/?.=\x00-\x1F\x7F]+(; *[^][ ()\\<>@,;:"/?.=\x00-\x1F\x7F]+=([^][ ()\\<>@,;:"/?.=\x00-\x1F\x7F]+|"(\\[\x00-\x7F]|[^\x0D"\\])*"))*$` type CompressionMIMEType string @@ -448,7 +451,7 @@ const ( // +union type ProviderLoadBalancerParameters struct { // type is the underlying infrastructure provider for the load balancer. - // Allowed values are "AWS", "Azure", "BareMetal", "GCP", "Nutanix", + // Allowed values are "AWS", "Azure", "BareMetal", "GCP", "IBM", "Nutanix", // "OpenStack", and "VSphere". // // +unionDiscriminator @@ -473,10 +476,19 @@ type ProviderLoadBalancerParameters struct { // // +optional GCP *GCPLoadBalancerParameters `json:"gcp,omitempty"` + + // ibm provides configuration settings that are specific to IBM Cloud + // load balancers. + // + // If empty, defaults will be applied. See specific ibm fields for + // details about their defaults. + // + // +optional + IBM *IBMLoadBalancerParameters `json:"ibm,omitempty"` } // LoadBalancerProviderType is the underlying infrastructure provider for the -// load balancer. Allowed values are "AWS", "Azure", "BareMetal", "GCP", "Nutanix", +// load balancer. Allowed values are "AWS", "Azure", "BareMetal", "GCP", "IBM", "Nutanix", // "OpenStack", and "VSphere". // // +kubebuilder:validation:Enum=AWS;Azure;BareMetal;GCP;Nutanix;OpenStack;VSphere;IBM @@ -573,6 +585,33 @@ const ( GCPLocalAccess GCPClientAccess = "Local" ) +// IBMLoadBalancerParameters provides configuration settings that are +// specific to IBM Cloud load balancers. +type IBMLoadBalancerParameters struct { + // protocol specifies whether the load balancer uses PROXY protocol to forward connections to + // the IngressController. See "service.kubernetes.io/ibm-load-balancer-cloud-provider-enable-features: + // "proxy-protocol"" at https://cloud.ibm.com/docs/containers?topic=containers-vpc-lbaas" + // + // PROXY protocol can be used with load balancers that support it to + // communicate the source addresses of client connections when + // forwarding those connections to the IngressController. Using PROXY + // protocol enables the IngressController to report those source + // addresses instead of reporting the load balancer's address in HTTP + // headers and logs. Note that enabling PROXY protocol on the + // IngressController will cause connections to fail if you are not using + // a load balancer that uses PROXY protocol to forward connections to + // the IngressController. See + // http://www.haproxy.org/download/2.2/doc/proxy-protocol.txt for + // information about PROXY protocol. + // + // Valid values for protocol are TCP, PROXY and omitted. + // When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. + // The current default is TCP, without the proxy protocol enabled. + // + // +optional + Protocol IngressControllerProtocol `json:"protocol,omitempty"` +} + // AWSClassicLoadBalancerParameters holds configuration parameters for an // AWS Classic load balancer. type AWSClassicLoadBalancerParameters struct { @@ -983,14 +1022,16 @@ type SyslogLoggingDestinationParameters struct { // +optional Facility string `json:"facility,omitempty"` - // maxLength is the maximum length of the syslog message + // maxLength is the maximum length of the log message. // - // If this field is empty, the maxLength is set to "1024". + // Valid values are integers in the range 480 to 4096, inclusive. + // + // When omitted, the default value is 1024. // - // +kubebuilder:validation:Optional // +kubebuilder:validation:Maximum=4096 // +kubebuilder:validation:Minimum=480 // +kubebuilder:default=1024 + // +default:=1024 // +optional MaxLength uint32 `json:"maxLength,omitempty"` } @@ -998,6 +1039,18 @@ type SyslogLoggingDestinationParameters struct { // ContainerLoggingDestinationParameters describes parameters for the Container // logging destination type. type ContainerLoggingDestinationParameters struct { + // maxLength is the maximum length of the log message. + // + // Valid values are integers in the range 480 to 8192, inclusive. + // + // When omitted, the default value is 1024. + // + // +kubebuilder:validation:Maximum=8192 + // +kubebuilder:validation:Minimum=480 + // +kubebuilder:default=1024 + // +default:=1024 + // +optional + MaxLength int32 `json:"maxLength,omitempty"` } // LoggingDestination describes a destination for log messages. @@ -1346,6 +1399,144 @@ type IngressControllerHTTPHeaders struct { // +nullable // +optional HeaderNameCaseAdjustments []IngressControllerHTTPHeaderNameCaseAdjustment `json:"headerNameCaseAdjustments,omitempty"` + + // actions specifies options for modifying headers and their values. + // Note that this option only applies to cleartext HTTP connections + // and to secure HTTP connections for which the ingress controller + // terminates encryption (that is, edge-terminated or reencrypt + // connections). Headers cannot be modified for TLS passthrough + // connections. + // Setting the HSTS (`Strict-Transport-Security`) header is not supported via actions. `Strict-Transport-Security` + // may only be configured using the "haproxy.router.openshift.io/hsts_header" route annotation, and only in + // accordance with the policy specified in Ingress.Spec.RequiredHSTSPolicies. + // Any actions defined here are applied after any actions related to the following other fields: + // cache-control, spec.clientTLS, + // spec.httpHeaders.forwardedHeaderPolicy, spec.httpHeaders.uniqueId, + // and spec.httpHeaders.headerNameCaseAdjustments. + // In case of HTTP request headers, the actions specified in spec.httpHeaders.actions on the Route will be executed after + // the actions specified in the IngressController's spec.httpHeaders.actions field. + // In case of HTTP response headers, the actions specified in spec.httpHeaders.actions on the IngressController will be + // executed after the actions specified in the Route's spec.httpHeaders.actions field. + // Headers set using this API cannot be captured for use in access logs. + // The following header names are reserved and may not be modified via this API: + // Strict-Transport-Security, Proxy, Host, Cookie, Set-Cookie. + // Note that the total size of all net added headers *after* interpolating dynamic values + // must not exceed the value of spec.tuningOptions.headerBufferMaxRewriteBytes on the + // IngressController. Please refer to the documentation + // for that API field for more details. + // +optional + Actions IngressControllerHTTPHeaderActions `json:"actions,omitempty"` +} + +// IngressControllerHTTPHeaderActions defines configuration for actions on HTTP request and response headers. +type IngressControllerHTTPHeaderActions struct { + // response is a list of HTTP response headers to modify. + // Actions defined here will modify the response headers of all requests passing through an ingress controller. + // These actions are applied to all Routes i.e. for all connections handled by the ingress controller defined within a cluster. + // IngressController actions for response headers will be executed after Route actions. + // Currently, actions may define to either `Set` or `Delete` headers values. + // Actions are applied in sequence as defined in this list. + // A maximum of 20 response header actions may be configured. + // Sample fetchers allowed are "res.hdr" and "ssl_c_der". + // Converters allowed are "lower" and "base64". + // Example header values: "%[res.hdr(X-target),lower]", "%{+Q}[ssl_c_der,base64]". + // +listType=map + // +listMapKey=name + // +optional + // +kubebuilder:validation:MaxItems=20 + // +kubebuilder:validation:XValidation:rule=`self.all(key, key.action.type == "Delete" || (has(key.action.set) && key.action.set.value.matches('^(?:%(?:%|(?:\\{[-+]?[QXE](?:,[-+]?[QXE])*\\})?\\[(?:res\\.hdr\\([0-9A-Za-z-]+\\)|ssl_c_der)(?:,(?:lower|base64))*\\])|[^%[:cntrl:]])+$')))`,message="Either the header value provided is not in correct format or the sample fetcher/converter specified is not allowed. The dynamic header value will be interpreted as an HAProxy format string as defined in http://cbonte.github.io/haproxy-dconv/2.6/configuration.html#8.2.6 and may use HAProxy's %[] syntax and otherwise must be a valid HTTP header value as defined in https://datatracker.ietf.org/doc/html/rfc7230#section-3.2. Sample fetchers allowed are res.hdr, ssl_c_der. Converters allowed are lower, base64." + Response []IngressControllerHTTPHeader `json:"response"` + // request is a list of HTTP request headers to modify. + // Actions defined here will modify the request headers of all requests passing through an ingress controller. + // These actions are applied to all Routes i.e. for all connections handled by the ingress controller defined within a cluster. + // IngressController actions for request headers will be executed before Route actions. + // Currently, actions may define to either `Set` or `Delete` headers values. + // Actions are applied in sequence as defined in this list. + // A maximum of 20 request header actions may be configured. + // Sample fetchers allowed are "req.hdr" and "ssl_c_der". + // Converters allowed are "lower" and "base64". + // Example header values: "%[req.hdr(X-target),lower]", "%{+Q}[ssl_c_der,base64]". + // + --- + // + Note: Any change to regex mentioned below must be reflected in the CRD validation of route in https://github.com/openshift/library-go/blob/master/pkg/route/validation/validation.go and vice-versa. + // +listType=map + // +listMapKey=name + // +optional + // +kubebuilder:validation:MaxItems=20 + // +kubebuilder:validation:XValidation:rule=`self.all(key, key.action.type == "Delete" || (has(key.action.set) && key.action.set.value.matches('^(?:%(?:%|(?:\\{[-+]?[QXE](?:,[-+]?[QXE])*\\})?\\[(?:req\\.hdr\\([0-9A-Za-z-]+\\)|ssl_c_der)(?:,(?:lower|base64))*\\])|[^%[:cntrl:]])+$')))`,message="Either the header value provided is not in correct format or the sample fetcher/converter specified is not allowed. The dynamic header value will be interpreted as an HAProxy format string as defined in http://cbonte.github.io/haproxy-dconv/2.6/configuration.html#8.2.6 and may use HAProxy's %[] syntax and otherwise must be a valid HTTP header value as defined in https://datatracker.ietf.org/doc/html/rfc7230#section-3.2. Sample fetchers allowed are req.hdr, ssl_c_der. Converters allowed are lower, base64." + Request []IngressControllerHTTPHeader `json:"request"` +} + +// IngressControllerHTTPHeader specifies configuration for setting or deleting an HTTP header. +type IngressControllerHTTPHeader struct { + // name specifies the name of a header on which to perform an action. Its value must be a valid HTTP header + // name as defined in RFC 2616 section 4.2. + // The name must consist only of alphanumeric and the following special characters, "-!#$%&'*+.^_`". + // The following header names are reserved and may not be modified via this API: + // Strict-Transport-Security, Proxy, Host, Cookie, Set-Cookie. + // It must be no more than 255 characters in length. + // Header name must be unique. + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=255 + // +kubebuilder:validation:Pattern="^[-!#$%&'*+.0-9A-Z^_`a-z|~]+$" + // +kubebuilder:validation:XValidation:rule="self.lowerAscii() != 'strict-transport-security'",message="strict-transport-security header may not be modified via header actions" + // +kubebuilder:validation:XValidation:rule="self.lowerAscii() != 'proxy'",message="proxy header may not be modified via header actions" + // +kubebuilder:validation:XValidation:rule="self.lowerAscii() != 'host'",message="host header may not be modified via header actions" + // +kubebuilder:validation:XValidation:rule="self.lowerAscii() != 'cookie'",message="cookie header may not be modified via header actions" + // +kubebuilder:validation:XValidation:rule="self.lowerAscii() != 'set-cookie'",message="set-cookie header may not be modified via header actions" + Name string `json:"name"` + // action specifies actions to perform on headers, such as setting or deleting headers. + // +kubebuilder:validation:Required + Action IngressControllerHTTPHeaderActionUnion `json:"action"` +} + +// IngressControllerHTTPHeaderActionUnion specifies an action to take on an HTTP header. +// +kubebuilder:validation:XValidation:rule="has(self.type) && self.type == 'Set' ? has(self.set) : !has(self.set)",message="set is required when type is Set, and forbidden otherwise" +// +union +type IngressControllerHTTPHeaderActionUnion struct { + // type defines the type of the action to be applied on the header. + // Possible values are Set or Delete. + // Set allows you to set HTTP request and response headers. + // Delete allows you to delete HTTP request and response headers. + // +unionDiscriminator + // +kubebuilder:validation:Enum:=Set;Delete + // +kubebuilder:validation:Required + Type IngressControllerHTTPHeaderActionType `json:"type"` + + // set specifies how the HTTP header should be set. + // This field is required when type is Set and forbidden otherwise. + // +optional + // +unionMember + Set *IngressControllerSetHTTPHeader `json:"set,omitempty"` +} + +// IngressControllerHTTPHeaderActionType defines actions that can be performed on HTTP headers. +type IngressControllerHTTPHeaderActionType string + +const ( + // Set specifies that an HTTP header should be set. + Set IngressControllerHTTPHeaderActionType = "Set" + // Delete specifies that an HTTP header should be deleted. + Delete IngressControllerHTTPHeaderActionType = "Delete" +) + +// IngressControllerSetHTTPHeader defines the value which needs to be set on an HTTP header. +type IngressControllerSetHTTPHeader struct { + // value specifies a header value. + // Dynamic values can be added. The value will be interpreted as an HAProxy format string as defined in + // http://cbonte.github.io/haproxy-dconv/2.6/configuration.html#8.2.6 and may use HAProxy's %[] syntax and + // otherwise must be a valid HTTP header value as defined in https://datatracker.ietf.org/doc/html/rfc7230#section-3.2. + // The value of this field must be no more than 16384 characters in length. + // Note that the total size of all net added headers *after* interpolating dynamic values + // must not exceed the value of spec.tuningOptions.headerBufferMaxRewriteBytes on the + // IngressController. + // + --- + // + Note: This limit was selected as most common web servers have a limit of 16384 characters or some lower limit. + // + See . + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=16384 + Value string `json:"value"` } // IngressControllerTuningOptions specifies options for tuning the performance @@ -1681,6 +1872,10 @@ type IngressControllerStatus struct { // +openshift:compatibility-gen:level=1 type IngressControllerList struct { metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata,omitempty"` - Items []IngressController `json:"items"` + + Items []IngressController `json:"items"` } diff --git a/vendor/github.com/openshift/api/operator/v1/types_insights.go b/vendor/github.com/openshift/api/operator/v1/types_insights.go index dd80c2239..b193c3853 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_insights.go +++ b/vendor/github.com/openshift/api/operator/v1/types_insights.go @@ -13,7 +13,10 @@ import ( // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 type InsightsOperator struct { - metav1.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata"` // spec is the specification of the desired behavior of the Insights. @@ -139,6 +142,10 @@ type GathererStatus struct { // +openshift:compatibility-gen:level=1 type InsightsOperatorList struct { metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata"` - Items []InsightsOperator `json:"items"` + + Items []InsightsOperator `json:"items"` } diff --git a/vendor/github.com/openshift/api/operator/v1/types_kubeapiserver.go b/vendor/github.com/openshift/api/operator/v1/types_kubeapiserver.go index 42797fce3..d43c8d088 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_kubeapiserver.go +++ b/vendor/github.com/openshift/api/operator/v1/types_kubeapiserver.go @@ -14,7 +14,10 @@ import ( // +openshift:compatibility-gen:level=1 // +openshift:compatibility-gen:level=1 type KubeAPIServer struct { - metav1.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata"` // spec is the specification of the desired behavior of the Kubernetes API Server @@ -65,6 +68,9 @@ type ServiceAccountIssuerStatus struct { // +openshift:compatibility-gen:level=1 type KubeAPIServerList struct { metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata"` // Items contains the items diff --git a/vendor/github.com/openshift/api/operator/v1/types_kubecontrollermanager.go b/vendor/github.com/openshift/api/operator/v1/types_kubecontrollermanager.go index e07d26f17..4a41d3fbb 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_kubecontrollermanager.go +++ b/vendor/github.com/openshift/api/operator/v1/types_kubecontrollermanager.go @@ -13,7 +13,10 @@ import ( // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 type KubeControllerManager struct { - metav1.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata"` // spec is the specification of the desired behavior of the Kubernetes Controller Manager @@ -50,6 +53,9 @@ type KubeControllerManagerStatus struct { // +openshift:compatibility-gen:level=1 type KubeControllerManagerList struct { metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata"` // Items contains the items diff --git a/vendor/github.com/openshift/api/operator/v1/types_kubestorageversionmigrator.go b/vendor/github.com/openshift/api/operator/v1/types_kubestorageversionmigrator.go index b187efc83..c556eee77 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_kubestorageversionmigrator.go +++ b/vendor/github.com/openshift/api/operator/v1/types_kubestorageversionmigrator.go @@ -13,7 +13,10 @@ import ( // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 type KubeStorageVersionMigrator struct { - metav1.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata"` // +kubebuilder:validation:Required @@ -39,6 +42,9 @@ type KubeStorageVersionMigratorStatus struct { // +openshift:compatibility-gen:level=1 type KubeStorageVersionMigratorList struct { metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata"` // Items contains the items diff --git a/vendor/github.com/openshift/api/operator/v1/types_network.go b/vendor/github.com/openshift/api/operator/v1/types_network.go index 29d795318..ffa8e0636 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_network.go +++ b/vendor/github.com/openshift/api/operator/v1/types_network.go @@ -15,7 +15,10 @@ import ( // +k8s:openapi-gen=true // +openshift:compatibility-gen:level=1 type Network struct { - metav1.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty"` Spec NetworkSpec `json:"spec,omitempty"` @@ -36,8 +39,12 @@ type NetworkStatus struct { // +openshift:compatibility-gen:level=1 type NetworkList struct { metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata,omitempty"` - Items []Network `json:"items"` + + Items []Network `json:"items"` } // NetworkSpec is the top-level network configuration object. @@ -404,7 +411,8 @@ type KuryrConfig struct { // mtu is the MTU that Kuryr should use when creating pod networks in Neutron. // The value has to be lower or equal to the MTU of the nodes network and Neutron has // to allow creation of tenant networks with such MTU. If unset Pod networks will be - // created with the same MTU as the nodes network has. + // created with the same MTU as the nodes network has. This also affects the services + // network created by cluster-network-operator. // +kubebuilder:validation:Minimum=0 // +optional MTU *uint32 `json:"mtu,omitempty"` @@ -472,6 +480,17 @@ type HybridOverlayConfig struct { type IPsecConfig struct { } +type IPForwardingMode string + +const ( + // IPForwardingRestricted limits the IP forwarding on OVN-Kube managed interfaces (br-ex, br-ex1) to only required + // service and other k8s related traffic + IPForwardingRestricted IPForwardingMode = "Restricted" + + // IPForwardingGlobal allows all IP traffic to be forwarded across OVN-Kube managed interfaces + IPForwardingGlobal IPForwardingMode = "Global" +) + // GatewayConfig holds node gateway-related parsed config file parameters and command-line overrides type GatewayConfig struct { // RoutingViaHost allows pod egress traffic to exit via the ovn-k8s-mp0 management port @@ -481,6 +500,13 @@ type GatewayConfig struct { // +kubebuilder:default:=false // +optional RoutingViaHost bool `json:"routingViaHost,omitempty"` + // IPForwarding controls IP forwarding for all traffic on OVN-Kubernetes managed interfaces (such as br-ex). + // By default this is set to Restricted, and Kubernetes related traffic is still forwarded appropriately, but other + // IP traffic will not be routed by the OCP node. If there is a desire to allow the host to forward traffic across + // OVN-Kubernetes managed interfaces, then set this field to "Global". + // The supported values are "Restricted" and "Global". + // +optional + IPForwarding IPForwardingMode `json:"ipForwarding,omitempty"` } type ExportNetworkFlows struct { @@ -535,6 +561,12 @@ type PolicyAuditConfig struct { // +optional MaxFileSize *uint32 `json:"maxFileSize,omitempty"` + // maxLogFiles specifies the maximum number of ACL_audit log files that can be present. + // +kubebuilder:default=5 + // +kubebuilder:validation:Minimum=1 + // +optional + MaxLogFiles *int32 `json:"maxLogFiles,omitempty"` + // destination is the location for policy log messages. // Regardless of this config, persistent logs will always be dumped to the host // at /var/log/ovn/ however diff --git a/vendor/github.com/openshift/api/operator/v1/types_openshiftapiserver.go b/vendor/github.com/openshift/api/operator/v1/types_openshiftapiserver.go index 5511db364..800605574 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_openshiftapiserver.go +++ b/vendor/github.com/openshift/api/operator/v1/types_openshiftapiserver.go @@ -13,7 +13,10 @@ import ( // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 type OpenShiftAPIServer struct { - metav1.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata"` // spec is the specification of the desired behavior of the OpenShift API Server. @@ -49,6 +52,9 @@ type OpenShiftAPIServerStatus struct { // +openshift:compatibility-gen:level=1 type OpenShiftAPIServerList struct { metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata"` // Items contains the items diff --git a/vendor/github.com/openshift/api/operator/v1/types_openshiftcontrollermanager.go b/vendor/github.com/openshift/api/operator/v1/types_openshiftcontrollermanager.go index 442e40314..fc7cc1086 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_openshiftcontrollermanager.go +++ b/vendor/github.com/openshift/api/operator/v1/types_openshiftcontrollermanager.go @@ -13,7 +13,10 @@ import ( // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 type OpenShiftControllerManager struct { - metav1.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata"` // +kubebuilder:validation:Required @@ -39,6 +42,9 @@ type OpenShiftControllerManagerStatus struct { // +openshift:compatibility-gen:level=1 type OpenShiftControllerManagerList struct { metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata"` // Items contains the items diff --git a/vendor/github.com/openshift/api/operator/v1/types_scheduler.go b/vendor/github.com/openshift/api/operator/v1/types_scheduler.go index 654f0d612..346dc8a8c 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_scheduler.go +++ b/vendor/github.com/openshift/api/operator/v1/types_scheduler.go @@ -13,7 +13,10 @@ import ( // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 type KubeScheduler struct { - metav1.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata"` // spec is the specification of the desired behavior of the Kubernetes Scheduler @@ -42,6 +45,9 @@ type KubeSchedulerStatus struct { // +openshift:compatibility-gen:level=1 type KubeSchedulerList struct { metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata"` // Items contains the items diff --git a/vendor/github.com/openshift/api/operator/v1/types_serviceca.go b/vendor/github.com/openshift/api/operator/v1/types_serviceca.go index a7404c4f2..e7967b41e 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_serviceca.go +++ b/vendor/github.com/openshift/api/operator/v1/types_serviceca.go @@ -13,7 +13,10 @@ import ( // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 type ServiceCA struct { - metav1.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata"` //spec holds user settable values for configuration @@ -41,6 +44,9 @@ type ServiceCAStatus struct { // +openshift:compatibility-gen:level=1 type ServiceCAList struct { metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata"` // Items contains the items diff --git a/vendor/github.com/openshift/api/operator/v1/types_servicecatalogapiserver.go b/vendor/github.com/openshift/api/operator/v1/types_servicecatalogapiserver.go index 2d96e0240..006b8bb99 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_servicecatalogapiserver.go +++ b/vendor/github.com/openshift/api/operator/v1/types_servicecatalogapiserver.go @@ -14,7 +14,10 @@ import ( // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 type ServiceCatalogAPIServer struct { - metav1.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty"` // +kubebuilder:validation:Required @@ -41,6 +44,9 @@ type ServiceCatalogAPIServerStatus struct { // +openshift:compatibility-gen:level=1 type ServiceCatalogAPIServerList struct { metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata"` // Items contains the items diff --git a/vendor/github.com/openshift/api/operator/v1/types_servicecatalogcontrollermanager.go b/vendor/github.com/openshift/api/operator/v1/types_servicecatalogcontrollermanager.go index 1317487e6..859965408 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_servicecatalogcontrollermanager.go +++ b/vendor/github.com/openshift/api/operator/v1/types_servicecatalogcontrollermanager.go @@ -14,7 +14,10 @@ import ( // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 type ServiceCatalogControllerManager struct { - metav1.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata"` // +kubebuilder:validation:Required @@ -41,6 +44,9 @@ type ServiceCatalogControllerManagerStatus struct { // +openshift:compatibility-gen:level=1 type ServiceCatalogControllerManagerList struct { metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata"` // Items contains the items diff --git a/vendor/github.com/openshift/api/operator/v1/types_storage.go b/vendor/github.com/openshift/api/operator/v1/types_storage.go index 38ffe26d5..96a4b5d88 100644 --- a/vendor/github.com/openshift/api/operator/v1/types_storage.go +++ b/vendor/github.com/openshift/api/operator/v1/types_storage.go @@ -13,7 +13,10 @@ import ( // Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). // +openshift:compatibility-gen:level=1 type Storage struct { - metav1.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty"` // spec holds user settable values for configuration @@ -26,9 +29,28 @@ type Storage struct { Status StorageStatus `json:"status"` } +// StorageDriverType indicates whether CSI migration should be enabled for drivers where it is optional. +// +kubebuilder:validation:Enum="";LegacyDeprecatedInTreeDriver;CSIWithMigrationDriver +type StorageDriverType string + +const ( + LegacyDeprecatedInTreeDriver StorageDriverType = "LegacyDeprecatedInTreeDriver" + CSIWithMigrationDriver StorageDriverType = "CSIWithMigrationDriver" +) + // StorageSpec is the specification of the desired behavior of the cluster storage operator. type StorageSpec struct { OperatorSpec `json:",inline"` + + // VSphereStorageDriver indicates the storage driver to use on VSphere clusters. + // Once this field is set to CSIWithMigrationDriver, it can not be changed. + // If this is empty, the platform will choose a good default, + // which may change over time without notice. + // The current default is CSIWithMigrationDriver and may not be changed. + // DEPRECATED: This field will be removed in a future release. + // +kubebuilder:validation:XValidation:rule="self != \"LegacyDeprecatedInTreeDriver\"",message="VSphereStorageDriver can not be set to LegacyDeprecatedInTreeDriver" + // +optional + VSphereStorageDriver StorageDriverType `json:"vsphereStorageDriver"` } // StorageStatus defines the observed status of the cluster storage operator. @@ -45,6 +67,10 @@ type StorageStatus struct { // +openshift:compatibility-gen:level=1 type StorageList struct { metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata,omitempty"` - Items []Storage `json:"items"` + + Items []Storage `json:"items"` } diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/operator/v1/zz_generated.deepcopy.go index 5304a3901..3f47cd441 100644 --- a/vendor/github.com/openshift/api/operator/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.deepcopy.go @@ -13,6 +13,22 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSCSIDriverConfigSpec) DeepCopyInto(out *AWSCSIDriverConfigSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSCSIDriverConfigSpec. +func (in *AWSCSIDriverConfigSpec) DeepCopy() *AWSCSIDriverConfigSpec { + if in == nil { + return nil + } + out := new(AWSCSIDriverConfigSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AWSClassicLoadBalancerParameters) DeepCopyInto(out *AWSClassicLoadBalancerParameters) { *out = *in @@ -233,9 +249,61 @@ func (in *AuthenticationStatus) DeepCopy() *AuthenticationStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureCSIDriverConfigSpec) DeepCopyInto(out *AzureCSIDriverConfigSpec) { + *out = *in + if in.DiskEncryptionSet != nil { + in, out := &in.DiskEncryptionSet, &out.DiskEncryptionSet + *out = new(AzureDiskEncryptionSet) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureCSIDriverConfigSpec. +func (in *AzureCSIDriverConfigSpec) DeepCopy() *AzureCSIDriverConfigSpec { + if in == nil { + return nil + } + out := new(AzureCSIDriverConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureDiskEncryptionSet) DeepCopyInto(out *AzureDiskEncryptionSet) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureDiskEncryptionSet. +func (in *AzureDiskEncryptionSet) DeepCopy() *AzureDiskEncryptionSet { + if in == nil { + return nil + } + out := new(AzureDiskEncryptionSet) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CSIDriverConfigSpec) DeepCopyInto(out *CSIDriverConfigSpec) { *out = *in + if in.AWS != nil { + in, out := &in.AWS, &out.AWS + *out = new(AWSCSIDriverConfigSpec) + **out = **in + } + if in.Azure != nil { + in, out := &in.Azure, &out.Azure + *out = new(AzureCSIDriverConfigSpec) + (*in).DeepCopyInto(*out) + } + if in.GCP != nil { + in, out := &in.GCP, &out.GCP + *out = new(GCPCSIDriverConfigSpec) + (*in).DeepCopyInto(*out) + } if in.VSphere != nil { in, out := &in.VSphere, &out.VSphere *out = new(VSphereCSIDriverConfigSpec) @@ -1409,6 +1477,43 @@ func (in *ForwardPlugin) DeepCopy() *ForwardPlugin { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GCPCSIDriverConfigSpec) DeepCopyInto(out *GCPCSIDriverConfigSpec) { + *out = *in + if in.KMSKey != nil { + in, out := &in.KMSKey, &out.KMSKey + *out = new(GCPKMSKeyReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPCSIDriverConfigSpec. +func (in *GCPCSIDriverConfigSpec) DeepCopy() *GCPCSIDriverConfigSpec { + if in == nil { + return nil + } + out := new(GCPCSIDriverConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GCPKMSKeyReference) DeepCopyInto(out *GCPKMSKeyReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCPKMSKeyReference. +func (in *GCPKMSKeyReference) DeepCopy() *GCPKMSKeyReference { + if in == nil { + return nil + } + out := new(GCPKMSKeyReference) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *GCPLoadBalancerParameters) DeepCopyInto(out *GCPLoadBalancerParameters) { *out = *in @@ -1585,6 +1690,22 @@ func (in *HybridOverlayConfig) DeepCopy() *HybridOverlayConfig { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IBMLoadBalancerParameters) DeepCopyInto(out *IBMLoadBalancerParameters) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IBMLoadBalancerParameters. +func (in *IBMLoadBalancerParameters) DeepCopy() *IBMLoadBalancerParameters { + if in == nil { + return nil + } + out := new(IBMLoadBalancerParameters) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *IPAMConfig) DeepCopyInto(out *IPAMConfig) { *out = *in @@ -1746,6 +1867,74 @@ func (in *IngressControllerCaptureHTTPHeaders) DeepCopy() *IngressControllerCapt return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressControllerHTTPHeader) DeepCopyInto(out *IngressControllerHTTPHeader) { + *out = *in + in.Action.DeepCopyInto(&out.Action) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressControllerHTTPHeader. +func (in *IngressControllerHTTPHeader) DeepCopy() *IngressControllerHTTPHeader { + if in == nil { + return nil + } + out := new(IngressControllerHTTPHeader) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressControllerHTTPHeaderActionUnion) DeepCopyInto(out *IngressControllerHTTPHeaderActionUnion) { + *out = *in + if in.Set != nil { + in, out := &in.Set, &out.Set + *out = new(IngressControllerSetHTTPHeader) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressControllerHTTPHeaderActionUnion. +func (in *IngressControllerHTTPHeaderActionUnion) DeepCopy() *IngressControllerHTTPHeaderActionUnion { + if in == nil { + return nil + } + out := new(IngressControllerHTTPHeaderActionUnion) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressControllerHTTPHeaderActions) DeepCopyInto(out *IngressControllerHTTPHeaderActions) { + *out = *in + if in.Response != nil { + in, out := &in.Response, &out.Response + *out = make([]IngressControllerHTTPHeader, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Request != nil { + in, out := &in.Request, &out.Request + *out = make([]IngressControllerHTTPHeader, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressControllerHTTPHeaderActions. +func (in *IngressControllerHTTPHeaderActions) DeepCopy() *IngressControllerHTTPHeaderActions { + if in == nil { + return nil + } + out := new(IngressControllerHTTPHeaderActions) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *IngressControllerHTTPHeaders) DeepCopyInto(out *IngressControllerHTTPHeaders) { *out = *in @@ -1755,6 +1944,7 @@ func (in *IngressControllerHTTPHeaders) DeepCopyInto(out *IngressControllerHTTPH *out = make([]IngressControllerHTTPHeaderNameCaseAdjustment, len(*in)) copy(*out, *in) } + in.Actions.DeepCopyInto(&out.Actions) return } @@ -1838,6 +2028,22 @@ func (in *IngressControllerLogging) DeepCopy() *IngressControllerLogging { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressControllerSetHTTPHeader) DeepCopyInto(out *IngressControllerSetHTTPHeader) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressControllerSetHTTPHeader. +func (in *IngressControllerSetHTTPHeader) DeepCopy() *IngressControllerSetHTTPHeader { + if in == nil { + return nil + } + out := new(IngressControllerSetHTTPHeader) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *IngressControllerSpec) DeepCopyInto(out *IngressControllerSpec) { *out = *in @@ -3391,6 +3597,11 @@ func (in *PolicyAuditConfig) DeepCopyInto(out *PolicyAuditConfig) { *out = new(uint32) **out = **in } + if in.MaxLogFiles != nil { + in, out := &in.MaxLogFiles, &out.MaxLogFiles + *out = new(int32) + **out = **in + } return } @@ -3454,6 +3665,11 @@ func (in *ProviderLoadBalancerParameters) DeepCopyInto(out *ProviderLoadBalancer *out = new(GCPLoadBalancerParameters) **out = **in } + if in.IBM != nil { + in, out := &in.IBM, &out.IBM + *out = new(IBMLoadBalancerParameters) + **out = **in + } return } diff --git a/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go index 50ecdf372..d10bbd51f 100644 --- a/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/operator/v1/zz_generated.swagger_doc_generated.go @@ -26,7 +26,8 @@ func (GenerationStatus) SwaggerDoc() map[string]string { } var map_MyOperatorResource = map[string]string{ - "": "MyOperatorResource is an example operator configuration type\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "": "MyOperatorResource is an example operator configuration type\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", } func (MyOperatorResource) SwaggerDoc() map[string]string { @@ -63,7 +64,7 @@ var map_OperatorSpec = map[string]string{ "managementState": "managementState indicates whether and how the operator should manage the component", "logLevel": "logLevel is an intent based logging for an overall component. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for their operands.\n\nValid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\".", "operatorLogLevel": "operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves.\n\nValid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\".", - "unsupportedConfigOverrides": "unsupportedConfigOverrides holds a sparse config that will override any previously set options. It only needs to be the fields to override it will end up overlaying in the following order: 1. hardcoded defaults 2. observedConfig 3. unsupportedConfigOverrides", + "unsupportedConfigOverrides": "unsupportedConfigOverrides overrides the final configuration that was computed by the operator. Red Hat does not support the use of this field. Misuse of this field could lead to unexpected behavior or conflict with other configuration options. Seek guidance from the Red Hat support before using this field. Use of this property blocks cluster upgrades, it must be removed before upgrading your cluster.", "observedConfig": "observedConfig holds a sparse config that controller has observed from the cluster state. It exists in spec because it is an input to the level for the operator", } @@ -106,7 +107,8 @@ func (StaticPodOperatorStatus) SwaggerDoc() map[string]string { } var map_Authentication = map[string]string{ - "": "Authentication provides information to configure an operator to manage authentication.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "": "Authentication provides information to configure an operator to manage authentication.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", } func (Authentication) SwaggerDoc() map[string]string { @@ -114,7 +116,8 @@ func (Authentication) SwaggerDoc() map[string]string { } var map_AuthenticationList = map[string]string{ - "": "AuthenticationList is a collection of items\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "": "AuthenticationList is a collection of items\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", } func (AuthenticationList) SwaggerDoc() map[string]string { @@ -138,7 +141,8 @@ func (OAuthAPIServerStatus) SwaggerDoc() map[string]string { } var map_CloudCredential = map[string]string{ - "": "CloudCredential provides a means to configure an operator to manage CredentialsRequests.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "": "CloudCredential provides a means to configure an operator to manage CredentialsRequests.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", } func (CloudCredential) SwaggerDoc() map[string]string { @@ -146,7 +150,8 @@ func (CloudCredential) SwaggerDoc() map[string]string { } var map_CloudCredentialList = map[string]string{ - "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", } func (CloudCredentialList) SwaggerDoc() map[string]string { @@ -171,9 +176,10 @@ func (CloudCredentialStatus) SwaggerDoc() map[string]string { } var map_Config = map[string]string{ - "": "Config specifies the behavior of the config operator which is responsible for creating the initial configuration of other components on the cluster. The operator also handles installation, migration or synchronization of cloud configurations for AWS and Azure cloud based clusters\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "spec": "spec is the specification of the desired behavior of the Config Operator.", - "status": "status defines the observed status of the Config Operator.", + "": "Config specifies the behavior of the config operator which is responsible for creating the initial configuration of other components on the cluster. The operator also handles installation, migration or synchronization of cloud configurations for AWS and Azure cloud based clusters\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec is the specification of the desired behavior of the Config Operator.", + "status": "status defines the observed status of the Config Operator.", } func (Config) SwaggerDoc() map[string]string { @@ -181,8 +187,9 @@ func (Config) SwaggerDoc() map[string]string { } var map_ConfigList = map[string]string{ - "": "ConfigList is a collection of items\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "items": "Items contains the items", + "": "ConfigList is a collection of items\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "Items contains the items", } func (ConfigList) SwaggerDoc() map[string]string { @@ -199,7 +206,8 @@ func (AddPage) SwaggerDoc() map[string]string { } var map_Console = map[string]string{ - "": "Console provides a means to configure an operator to manage the console.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "": "Console provides a means to configure an operator to manage the console.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", } func (Console) SwaggerDoc() map[string]string { @@ -234,7 +242,8 @@ func (ConsoleCustomization) SwaggerDoc() map[string]string { } var map_ConsoleList = map[string]string{ - "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", } func (ConsoleList) SwaggerDoc() map[string]string { @@ -380,9 +389,41 @@ func (StatuspageProvider) SwaggerDoc() map[string]string { return map_StatuspageProvider } +var map_AWSCSIDriverConfigSpec = map[string]string{ + "": "AWSCSIDriverConfigSpec defines properties that can be configured for the AWS CSI driver.", + "kmsKeyARN": "kmsKeyARN sets the cluster default storage class to encrypt volumes with a user-defined KMS key, rather than the default KMS key used by AWS. The value may be either the ARN or Alias ARN of a KMS key.", +} + +func (AWSCSIDriverConfigSpec) SwaggerDoc() map[string]string { + return map_AWSCSIDriverConfigSpec +} + +var map_AzureCSIDriverConfigSpec = map[string]string{ + "": "AzureCSIDriverConfigSpec defines properties that can be configured for the Azure CSI driver.", + "diskEncryptionSet": "diskEncryptionSet sets the cluster default storage class to encrypt volumes with a customer-managed encryption set, rather than the default platform-managed keys.", +} + +func (AzureCSIDriverConfigSpec) SwaggerDoc() map[string]string { + return map_AzureCSIDriverConfigSpec +} + +var map_AzureDiskEncryptionSet = map[string]string{ + "": "AzureDiskEncryptionSet defines the configuration for a disk encryption set.", + "subscriptionID": "subscriptionID defines the Azure subscription that contains the disk encryption set. The value should meet the following conditions: 1. It should be a 128-bit number. 2. It should be 36 characters (32 hexadecimal characters and 4 hyphens) long. 3. It should be displayed in five groups separated by hyphens (-). 4. The first group should be 8 characters long. 5. The second, third, and fourth groups should be 4 characters long. 6. The fifth group should be 12 characters long. An Example SubscrionID: f2007bbf-f802-4a47-9336-cf7c6b89b378", + "resourceGroup": "resourceGroup defines the Azure resource group that contains the disk encryption set. The value should consist of only alphanumberic characters, underscores (_), parentheses, hyphens and periods. The value should not end in a period and be at most 90 characters in length.", + "name": "name is the name of the disk encryption set that will be set on the default storage class. The value should consist of only alphanumberic characters, underscores (_), hyphens, and be at most 80 characters in length.", +} + +func (AzureDiskEncryptionSet) SwaggerDoc() map[string]string { + return map_AzureDiskEncryptionSet +} + var map_CSIDriverConfigSpec = map[string]string{ "": "CSIDriverConfigSpec defines configuration spec that can be used to optionally configure a specific CSI Driver.", - "driverType": "driverType indicates type of CSI driver for which the driverConfig is being applied to.\n\nValid values are:\n\n* vSphere\n\nAllows configuration of vsphere CSI driver topology.", + "driverType": "driverType indicates type of CSI driver for which the driverConfig is being applied to. Valid values are: AWS, Azure, GCP, vSphere and omitted. Consumers should treat unknown values as a NO-OP.", + "aws": "aws is used to configure the AWS CSI driver.", + "azure": "azure is used to configure the Azure CSI driver.", + "gcp": "gcp is used to configure the GCP CSI driver.", "vSphere": "vsphere is used to configure the vsphere CSI driver.", } @@ -391,9 +432,10 @@ func (CSIDriverConfigSpec) SwaggerDoc() map[string]string { } var map_ClusterCSIDriver = map[string]string{ - "": "ClusterCSIDriver object allows management and configuration of a CSI driver operator installed by default in OpenShift. Name of the object must be name of the CSI driver it operates. See CSIDriverName type for list of allowed values.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "spec": "spec holds user settable values for configuration", - "status": "status holds observed values from the cluster. They may not be overridden.", + "": "ClusterCSIDriver object allows management and configuration of a CSI driver operator installed by default in OpenShift. Name of the object must be name of the CSI driver it operates. See CSIDriverName type for list of allowed values.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", } func (ClusterCSIDriver) SwaggerDoc() map[string]string { @@ -401,7 +443,8 @@ func (ClusterCSIDriver) SwaggerDoc() map[string]string { } var map_ClusterCSIDriverList = map[string]string{ - "": "ClusterCSIDriverList contains a list of ClusterCSIDriver\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "": "ClusterCSIDriverList contains a list of ClusterCSIDriver\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", } func (ClusterCSIDriverList) SwaggerDoc() map[string]string { @@ -426,6 +469,27 @@ func (ClusterCSIDriverStatus) SwaggerDoc() map[string]string { return map_ClusterCSIDriverStatus } +var map_GCPCSIDriverConfigSpec = map[string]string{ + "": "GCPCSIDriverConfigSpec defines properties that can be configured for the GCP CSI driver.", + "kmsKey": "kmsKey sets the cluster default storage class to encrypt volumes with customer-supplied encryption keys, rather than the default keys managed by GCP.", +} + +func (GCPCSIDriverConfigSpec) SwaggerDoc() map[string]string { + return map_GCPCSIDriverConfigSpec +} + +var map_GCPKMSKeyReference = map[string]string{ + "": "GCPKMSKeyReference gathers required fields for looking up a GCP KMS Key", + "name": "name is the name of the customer-managed encryption key to be used for disk encryption. The value should correspond to an existing KMS key and should consist of only alphanumeric characters, hyphens (-) and underscores (_), and be at most 63 characters in length.", + "keyRing": "keyRing is the name of the KMS Key Ring which the KMS Key belongs to. The value should correspond to an existing KMS key ring and should consist of only alphanumeric characters, hyphens (-) and underscores (_), and be at most 63 characters in length.", + "projectID": "projectID is the ID of the Project in which the KMS Key Ring exists. It must be 6 to 30 lowercase letters, digits, or hyphens. It must start with a letter. Trailing hyphens are prohibited.", + "location": "location is the GCP location in which the Key Ring exists. The value must match an existing GCP location, or \"global\". Defaults to global, if not set.", +} + +func (GCPKMSKeyReference) SwaggerDoc() map[string]string { + return map_GCPKMSKeyReference +} + var map_VSphereCSIDriverConfigSpec = map[string]string{ "": "VSphereCSIDriverConfigSpec defines properties that can be configured for vsphere CSI driver.", "topologyCategories": "topologyCategories indicates tag categories with which vcenter resources such as hostcluster or datacenter were tagged with. If cluster Infrastructure object has a topology, values specified in Infrastructure object will be used and modifications to topologyCategories will be rejected.", @@ -436,9 +500,10 @@ func (VSphereCSIDriverConfigSpec) SwaggerDoc() map[string]string { } var map_CSISnapshotController = map[string]string{ - "": "CSISnapshotController provides a means to configure an operator to manage the CSI snapshots. `cluster` is the canonical name.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "spec": "spec holds user settable values for configuration", - "status": "status holds observed values from the cluster. They may not be overridden.", + "": "CSISnapshotController provides a means to configure an operator to manage the CSI snapshots. `cluster` is the canonical name.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", } func (CSISnapshotController) SwaggerDoc() map[string]string { @@ -446,7 +511,8 @@ func (CSISnapshotController) SwaggerDoc() map[string]string { } var map_CSISnapshotControllerList = map[string]string{ - "": "CSISnapshotControllerList contains a list of CSISnapshotControllers.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "": "CSISnapshotControllerList contains a list of CSISnapshotControllers.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", } func (CSISnapshotControllerList) SwaggerDoc() map[string]string { @@ -470,9 +536,10 @@ func (CSISnapshotControllerStatus) SwaggerDoc() map[string]string { } var map_DNS = map[string]string{ - "": "DNS manages the CoreDNS component to provide a name resolution service for pods and services in the cluster.\n\nThis supports the DNS-based service discovery specification: https://github.com/kubernetes/dns/blob/master/docs/specification.md\n\nMore details: https://kubernetes.io/docs/tasks/administer-cluster/coredns\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "spec": "spec is the specification of the desired behavior of the DNS.", - "status": "status is the most recently observed status of the DNS.", + "": "DNS manages the CoreDNS component to provide a name resolution service for pods and services in the cluster.\n\nThis supports the DNS-based service discovery specification: https://github.com/kubernetes/dns/blob/master/docs/specification.md\n\nMore details: https://kubernetes.io/docs/tasks/administer-cluster/coredns\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec is the specification of the desired behavior of the DNS.", + "status": "status is the most recently observed status of the DNS.", } func (DNS) SwaggerDoc() map[string]string { @@ -490,7 +557,8 @@ func (DNSCache) SwaggerDoc() map[string]string { } var map_DNSList = map[string]string{ - "": "DNSList contains a list of DNS\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "": "DNSList contains a list of DNS\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", } func (DNSList) SwaggerDoc() map[string]string { @@ -554,10 +622,11 @@ func (DNSTransportConfig) SwaggerDoc() map[string]string { } var map_ForwardPlugin = map[string]string{ - "": "ForwardPlugin defines a schema for configuring the CoreDNS forward plugin.", - "upstreams": "upstreams is a list of resolvers to forward name queries for subdomains of Zones. Each instance of CoreDNS performs health checking of Upstreams. When a healthy upstream returns an error during the exchange, another resolver is tried from Upstreams. The Upstreams are selected in the order specified in Policy. Each upstream is represented by an IP address or IP:port if the upstream listens on a port other than 53.\n\nA maximum of 15 upstreams is allowed per ForwardPlugin.", - "policy": "policy is used to determine the order in which upstream servers are selected for querying. Any one of the following values may be specified:\n\n* \"Random\" picks a random upstream server for each query. * \"RoundRobin\" picks upstream servers in a round-robin order, moving to the next server for each new query. * \"Sequential\" tries querying upstream servers in a sequential order until one responds, starting with the first server for each new query.\n\nThe default value is \"Random\"", - "transportConfig": "transportConfig is used to configure the transport type, server name, and optional custom CA or CA bundle to use when forwarding DNS requests to an upstream resolver.\n\nThe default value is \"\" (empty) which results in a standard cleartext connection being used when forwarding DNS requests to an upstream resolver.", + "": "ForwardPlugin defines a schema for configuring the CoreDNS forward plugin.", + "upstreams": "upstreams is a list of resolvers to forward name queries for subdomains of Zones. Each instance of CoreDNS performs health checking of Upstreams. When a healthy upstream returns an error during the exchange, another resolver is tried from Upstreams. The Upstreams are selected in the order specified in Policy. Each upstream is represented by an IP address or IP:port if the upstream listens on a port other than 53.\n\nA maximum of 15 upstreams is allowed per ForwardPlugin.", + "policy": "policy is used to determine the order in which upstream servers are selected for querying. Any one of the following values may be specified:\n\n* \"Random\" picks a random upstream server for each query. * \"RoundRobin\" picks upstream servers in a round-robin order, moving to the next server for each new query. * \"Sequential\" tries querying upstream servers in a sequential order until one responds, starting with the first server for each new query.\n\nThe default value is \"Random\"", + "transportConfig": "transportConfig is used to configure the transport type, server name, and optional custom CA or CA bundle to use when forwarding DNS requests to an upstream resolver.\n\nThe default value is \"\" (empty) which results in a standard cleartext connection being used when forwarding DNS requests to an upstream resolver.", + "protocolStrategy": "protocolStrategy specifies the protocol to use for upstream DNS requests. Valid values for protocolStrategy are \"TCP\" and omitted. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default is to use the protocol of the original client request. \"TCP\" specifies that the platform should use TCP for all upstream DNS requests, even if the client request uses UDP. \"TCP\" is useful for UDP-specific issues such as those created by non-compliant upstream resolvers, but may consume more bandwidth or increase DNS response time. Note that protocolStrategy only affects the protocol of DNS requests that CoreDNS makes to upstream resolvers. It does not affect the protocol of DNS requests between clients and CoreDNS.", } func (ForwardPlugin) SwaggerDoc() map[string]string { @@ -587,10 +656,11 @@ func (Upstream) SwaggerDoc() map[string]string { } var map_UpstreamResolvers = map[string]string{ - "": "UpstreamResolvers defines a schema for configuring the CoreDNS forward plugin in the specific case of the default (\".\") server. It defers from ForwardPlugin in the default values it accepts: * At least one upstream should be specified. * the default policy is Sequential", - "upstreams": "Upstreams is a list of resolvers to forward name queries for the \".\" domain. Each instance of CoreDNS performs health checking of Upstreams. When a healthy upstream returns an error during the exchange, another resolver is tried from Upstreams. The Upstreams are selected in the order specified in Policy.\n\nA maximum of 15 upstreams is allowed per ForwardPlugin. If no Upstreams are specified, /etc/resolv.conf is used by default", - "policy": "Policy is used to determine the order in which upstream servers are selected for querying. Any one of the following values may be specified:\n\n* \"Random\" picks a random upstream server for each query. * \"RoundRobin\" picks upstream servers in a round-robin order, moving to the next server for each new query. * \"Sequential\" tries querying upstream servers in a sequential order until one responds, starting with the first server for each new query.\n\nThe default value is \"Sequential\"", - "transportConfig": "transportConfig is used to configure the transport type, server name, and optional custom CA or CA bundle to use when forwarding DNS requests to an upstream resolver.\n\nThe default value is \"\" (empty) which results in a standard cleartext connection being used when forwarding DNS requests to an upstream resolver.", + "": "UpstreamResolvers defines a schema for configuring the CoreDNS forward plugin in the specific case of the default (\".\") server. It defers from ForwardPlugin in the default values it accepts: * At least one upstream should be specified. * the default policy is Sequential", + "upstreams": "Upstreams is a list of resolvers to forward name queries for the \".\" domain. Each instance of CoreDNS performs health checking of Upstreams. When a healthy upstream returns an error during the exchange, another resolver is tried from Upstreams. The Upstreams are selected in the order specified in Policy.\n\nA maximum of 15 upstreams is allowed per ForwardPlugin. If no Upstreams are specified, /etc/resolv.conf is used by default", + "policy": "Policy is used to determine the order in which upstream servers are selected for querying. Any one of the following values may be specified:\n\n* \"Random\" picks a random upstream server for each query. * \"RoundRobin\" picks upstream servers in a round-robin order, moving to the next server for each new query. * \"Sequential\" tries querying upstream servers in a sequential order until one responds, starting with the first server for each new query.\n\nThe default value is \"Sequential\"", + "transportConfig": "transportConfig is used to configure the transport type, server name, and optional custom CA or CA bundle to use when forwarding DNS requests to an upstream resolver.\n\nThe default value is \"\" (empty) which results in a standard cleartext connection being used when forwarding DNS requests to an upstream resolver.", + "protocolStrategy": "protocolStrategy specifies the protocol to use for upstream DNS requests. Valid values for protocolStrategy are \"TCP\" and omitted. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default is to use the protocol of the original client request. \"TCP\" specifies that the platform should use TCP for all upstream DNS requests, even if the client request uses UDP. \"TCP\" is useful for UDP-specific issues such as those created by non-compliant upstream resolvers, but may consume more bandwidth or increase DNS response time. Note that protocolStrategy only affects the protocol of DNS requests that CoreDNS makes to upstream resolvers. It does not affect the protocol of DNS requests between clients and CoreDNS.", } func (UpstreamResolvers) SwaggerDoc() map[string]string { @@ -598,7 +668,8 @@ func (UpstreamResolvers) SwaggerDoc() map[string]string { } var map_Etcd = map[string]string{ - "": "Etcd provides information to configure an operator to manage etcd.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "": "Etcd provides information to configure an operator to manage etcd.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", } func (Etcd) SwaggerDoc() map[string]string { @@ -606,8 +677,9 @@ func (Etcd) SwaggerDoc() map[string]string { } var map_EtcdList = map[string]string{ - "": "KubeAPISOperatorConfigList is a collection of items\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "items": "Items contains the items", + "": "KubeAPISOperatorConfigList is a collection of items\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "Items contains the items", } func (EtcdList) SwaggerDoc() map[string]string { @@ -667,7 +739,8 @@ func (ClientTLS) SwaggerDoc() map[string]string { } var map_ContainerLoggingDestinationParameters = map[string]string{ - "": "ContainerLoggingDestinationParameters describes parameters for the Container logging destination type.", + "": "ContainerLoggingDestinationParameters describes parameters for the Container logging destination type.", + "maxLength": "maxLength is the maximum length of the log message.\n\nValid values are integers in the range 480 to 8192, inclusive.\n\nWhen omitted, the default value is 1024.", } func (ContainerLoggingDestinationParameters) SwaggerDoc() map[string]string { @@ -717,10 +790,20 @@ func (HostNetworkStrategy) SwaggerDoc() map[string]string { return map_HostNetworkStrategy } +var map_IBMLoadBalancerParameters = map[string]string{ + "": "IBMLoadBalancerParameters provides configuration settings that are specific to IBM Cloud load balancers.", + "protocol": "protocol specifies whether the load balancer uses PROXY protocol to forward connections to the IngressController. See \"service.kubernetes.io/ibm-load-balancer-cloud-provider-enable-features: \"proxy-protocol\"\" at https://cloud.ibm.com/docs/containers?topic=containers-vpc-lbaas\"\n\nPROXY protocol can be used with load balancers that support it to communicate the source addresses of client connections when forwarding those connections to the IngressController. Using PROXY protocol enables the IngressController to report those source addresses instead of reporting the load balancer's address in HTTP headers and logs. Note that enabling PROXY protocol on the IngressController will cause connections to fail if you are not using a load balancer that uses PROXY protocol to forward connections to the IngressController. See http://www.haproxy.org/download/2.2/doc/proxy-protocol.txt for information about PROXY protocol.\n\nValid values for protocol are TCP, PROXY and omitted. When omitted, this means no opinion and the platform is left to choose a reasonable default, which is subject to change over time. The current default is TCP, without the proxy protocol enabled.", +} + +func (IBMLoadBalancerParameters) SwaggerDoc() map[string]string { + return map_IBMLoadBalancerParameters +} + var map_IngressController = map[string]string{ - "": "IngressController describes a managed ingress controller for the cluster. The controller can service OpenShift Route and Kubernetes Ingress resources.\n\nWhen an IngressController is created, a new ingress controller deployment is created to allow external traffic to reach the services that expose Ingress or Route resources. Updating this resource may lead to disruption for public facing network connections as a new ingress controller revision may be rolled out.\n\nhttps://kubernetes.io/docs/concepts/services-networking/ingress-controllers\n\nWhenever possible, sensible defaults for the platform are used. See each field for more details.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "spec": "spec is the specification of the desired behavior of the IngressController.", - "status": "status is the most recently observed status of the IngressController.", + "": "IngressController describes a managed ingress controller for the cluster. The controller can service OpenShift Route and Kubernetes Ingress resources.\n\nWhen an IngressController is created, a new ingress controller deployment is created to allow external traffic to reach the services that expose Ingress or Route resources. Updating this resource may lead to disruption for public facing network connections as a new ingress controller revision may be rolled out.\n\nhttps://kubernetes.io/docs/concepts/services-networking/ingress-controllers\n\nWhenever possible, sensible defaults for the platform are used. See each field for more details.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec is the specification of the desired behavior of the IngressController.", + "status": "status is the most recently observed status of the IngressController.", } func (IngressController) SwaggerDoc() map[string]string { @@ -767,11 +850,42 @@ func (IngressControllerCaptureHTTPHeaders) SwaggerDoc() map[string]string { return map_IngressControllerCaptureHTTPHeaders } +var map_IngressControllerHTTPHeader = map[string]string{ + "": "IngressControllerHTTPHeader specifies configuration for setting or deleting an HTTP header.", + "name": "name specifies the name of a header on which to perform an action. Its value must be a valid HTTP header name as defined in RFC 2616 section 4.2. The name must consist only of alphanumeric and the following special characters, \"-!#$%&'*+.^_`\". The following header names are reserved and may not be modified via this API: Strict-Transport-Security, Proxy, Host, Cookie, Set-Cookie. It must be no more than 255 characters in length. Header name must be unique.", + "action": "action specifies actions to perform on headers, such as setting or deleting headers.", +} + +func (IngressControllerHTTPHeader) SwaggerDoc() map[string]string { + return map_IngressControllerHTTPHeader +} + +var map_IngressControllerHTTPHeaderActionUnion = map[string]string{ + "": "IngressControllerHTTPHeaderActionUnion specifies an action to take on an HTTP header.", + "type": "type defines the type of the action to be applied on the header. Possible values are Set or Delete. Set allows you to set HTTP request and response headers. Delete allows you to delete HTTP request and response headers.", + "set": "set specifies how the HTTP header should be set. This field is required when type is Set and forbidden otherwise.", +} + +func (IngressControllerHTTPHeaderActionUnion) SwaggerDoc() map[string]string { + return map_IngressControllerHTTPHeaderActionUnion +} + +var map_IngressControllerHTTPHeaderActions = map[string]string{ + "": "IngressControllerHTTPHeaderActions defines configuration for actions on HTTP request and response headers.", + "response": "response is a list of HTTP response headers to modify. Actions defined here will modify the response headers of all requests passing through an ingress controller. These actions are applied to all Routes i.e. for all connections handled by the ingress controller defined within a cluster. IngressController actions for response headers will be executed after Route actions. Currently, actions may define to either `Set` or `Delete` headers values. Actions are applied in sequence as defined in this list. A maximum of 20 response header actions may be configured. Sample fetchers allowed are \"res.hdr\" and \"ssl_c_der\". Converters allowed are \"lower\" and \"base64\". Example header values: \"%[res.hdr(X-target),lower]\", \"%{+Q}[ssl_c_der,base64]\".", + "request": "request is a list of HTTP request headers to modify. Actions defined here will modify the request headers of all requests passing through an ingress controller. These actions are applied to all Routes i.e. for all connections handled by the ingress controller defined within a cluster. IngressController actions for request headers will be executed before Route actions. Currently, actions may define to either `Set` or `Delete` headers values. Actions are applied in sequence as defined in this list. A maximum of 20 request header actions may be configured. Sample fetchers allowed are \"req.hdr\" and \"ssl_c_der\". Converters allowed are \"lower\" and \"base64\". Example header values: \"%[req.hdr(X-target),lower]\", \"%{+Q}[ssl_c_der,base64]\". ", +} + +func (IngressControllerHTTPHeaderActions) SwaggerDoc() map[string]string { + return map_IngressControllerHTTPHeaderActions +} + var map_IngressControllerHTTPHeaders = map[string]string{ "": "IngressControllerHTTPHeaders specifies how the IngressController handles certain HTTP headers.", "forwardedHeaderPolicy": "forwardedHeaderPolicy specifies when and how the IngressController sets the Forwarded, X-Forwarded-For, X-Forwarded-Host, X-Forwarded-Port, X-Forwarded-Proto, and X-Forwarded-Proto-Version HTTP headers. The value may be one of the following:\n\n* \"Append\", which specifies that the IngressController appends the\n headers, preserving existing headers.\n\n* \"Replace\", which specifies that the IngressController sets the\n headers, replacing any existing Forwarded or X-Forwarded-* headers.\n\n* \"IfNone\", which specifies that the IngressController sets the\n headers if they are not already set.\n\n* \"Never\", which specifies that the IngressController never sets the\n headers, preserving any existing headers.\n\nBy default, the policy is \"Append\".", "uniqueId": "uniqueId describes configuration for a custom HTTP header that the ingress controller should inject into incoming HTTP requests. Typically, this header is configured to have a value that is unique to the HTTP request. The header can be used by applications or included in access logs to facilitate tracing individual HTTP requests.\n\nIf this field is empty, no such header is injected into requests.", "headerNameCaseAdjustments": "headerNameCaseAdjustments specifies case adjustments that can be applied to HTTP header names. Each adjustment is specified as an HTTP header name with the desired capitalization. For example, specifying \"X-Forwarded-For\" indicates that the \"x-forwarded-for\" HTTP header should be adjusted to have the specified capitalization.\n\nThese adjustments are only applied to cleartext, edge-terminated, and re-encrypt routes, and only when using HTTP/1.\n\nFor request headers, these adjustments are applied only for routes that have the haproxy.router.openshift.io/h1-adjust-case=true annotation. For response headers, these adjustments are applied to all HTTP responses.\n\nIf this field is empty, no request headers are adjusted.", + "actions": "actions specifies options for modifying headers and their values. Note that this option only applies to cleartext HTTP connections and to secure HTTP connections for which the ingress controller terminates encryption (that is, edge-terminated or reencrypt connections). Headers cannot be modified for TLS passthrough connections. Setting the HSTS (`Strict-Transport-Security`) header is not supported via actions. `Strict-Transport-Security` may only be configured using the \"haproxy.router.openshift.io/hsts_header\" route annotation, and only in accordance with the policy specified in Ingress.Spec.RequiredHSTSPolicies. Any actions defined here are applied after any actions related to the following other fields: cache-control, spec.clientTLS, spec.httpHeaders.forwardedHeaderPolicy, spec.httpHeaders.uniqueId, and spec.httpHeaders.headerNameCaseAdjustments. In case of HTTP request headers, the actions specified in spec.httpHeaders.actions on the Route will be executed after the actions specified in the IngressController's spec.httpHeaders.actions field. In case of HTTP response headers, the actions specified in spec.httpHeaders.actions on the IngressController will be executed after the actions specified in the Route's spec.httpHeaders.actions field. Headers set using this API cannot be captured for use in access logs. The following header names are reserved and may not be modified via this API: Strict-Transport-Security, Proxy, Host, Cookie, Set-Cookie. Note that the total size of all net added headers *after* interpolating dynamic values must not exceed the value of spec.tuningOptions.headerBufferMaxRewriteBytes on the IngressController. Please refer to the documentation for that API field for more details.", } func (IngressControllerHTTPHeaders) SwaggerDoc() map[string]string { @@ -789,7 +903,8 @@ func (IngressControllerHTTPUniqueIdHeaderPolicy) SwaggerDoc() map[string]string } var map_IngressControllerList = map[string]string{ - "": "IngressControllerList contains a list of IngressControllers.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "": "IngressControllerList contains a list of IngressControllers.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", } func (IngressControllerList) SwaggerDoc() map[string]string { @@ -805,6 +920,15 @@ func (IngressControllerLogging) SwaggerDoc() map[string]string { return map_IngressControllerLogging } +var map_IngressControllerSetHTTPHeader = map[string]string{ + "": "IngressControllerSetHTTPHeader defines the value which needs to be set on an HTTP header.", + "value": "value specifies a header value. Dynamic values can be added. The value will be interpreted as an HAProxy format string as defined in http://cbonte.github.io/haproxy-dconv/2.6/configuration.html#8.2.6 and may use HAProxy's %[] syntax and otherwise must be a valid HTTP header value as defined in https://datatracker.ietf.org/doc/html/rfc7230#section-3.2. The value of this field must be no more than 16384 characters in length. Note that the total size of all net added headers *after* interpolating dynamic values must not exceed the value of spec.tuningOptions.headerBufferMaxRewriteBytes on the IngressController. ", +} + +func (IngressControllerSetHTTPHeader) SwaggerDoc() map[string]string { + return map_IngressControllerSetHTTPHeader +} + var map_IngressControllerSpec = map[string]string{ "": "IngressControllerSpec is the specification of the desired behavior of the IngressController.", "domain": "domain is a DNS name serviced by the ingress controller and is used to configure multiple features:\n\n* For the LoadBalancerService endpoint publishing strategy, domain is\n used to configure DNS records. See endpointPublishingStrategy.\n\n* When using a generated default certificate, the certificate will be valid\n for domain and its subdomains. See defaultCertificate.\n\n* The value is published to individual Route statuses so that end-users\n know where to target external DNS records.\n\ndomain must be unique among all IngressControllers, and cannot be updated.\n\nIf empty, defaults to ingress.config.openshift.io/cluster .spec.domain.", @@ -920,9 +1044,10 @@ func (PrivateStrategy) SwaggerDoc() map[string]string { var map_ProviderLoadBalancerParameters = map[string]string{ "": "ProviderLoadBalancerParameters holds desired load balancer information specific to the underlying infrastructure provider.", - "type": "type is the underlying infrastructure provider for the load balancer. Allowed values are \"AWS\", \"Azure\", \"BareMetal\", \"GCP\", \"Nutanix\", \"OpenStack\", and \"VSphere\".", + "type": "type is the underlying infrastructure provider for the load balancer. Allowed values are \"AWS\", \"Azure\", \"BareMetal\", \"GCP\", \"IBM\", \"Nutanix\", \"OpenStack\", and \"VSphere\".", "aws": "aws provides configuration settings that are specific to AWS load balancers.\n\nIf empty, defaults will be applied. See specific aws fields for details about their defaults.", "gcp": "gcp provides configuration settings that are specific to GCP load balancers.\n\nIf empty, defaults will be applied. See specific gcp fields for details about their defaults.", + "ibm": "ibm provides configuration settings that are specific to IBM Cloud load balancers.\n\nIf empty, defaults will be applied. See specific ibm fields for details about their defaults.", } func (ProviderLoadBalancerParameters) SwaggerDoc() map[string]string { @@ -944,7 +1069,7 @@ var map_SyslogLoggingDestinationParameters = map[string]string{ "address": "address is the IP address of the syslog endpoint that receives log messages.", "port": "port is the UDP port number of the syslog endpoint that receives log messages.", "facility": "facility specifies the syslog facility of log messages.\n\nIf this field is empty, the facility is \"local1\".", - "maxLength": "maxLength is the maximum length of the syslog message\n\nIf this field is empty, the maxLength is set to \"1024\".", + "maxLength": "maxLength is the maximum length of the log message.\n\nValid values are integers in the range 480 to 4096, inclusive.\n\nWhen omitted, the default value is 1024.", } func (SyslogLoggingDestinationParameters) SwaggerDoc() map[string]string { @@ -986,9 +1111,10 @@ func (HealthCheck) SwaggerDoc() map[string]string { } var map_InsightsOperator = map[string]string{ - "": "\n\nInsightsOperator holds cluster-wide information about the Insights Operator.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "spec": "spec is the specification of the desired behavior of the Insights.", - "status": "status is the most recently observed status of the Insights operator.", + "": "\n\nInsightsOperator holds cluster-wide information about the Insights Operator.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec is the specification of the desired behavior of the Insights.", + "status": "status is the most recently observed status of the Insights operator.", } func (InsightsOperator) SwaggerDoc() map[string]string { @@ -996,7 +1122,8 @@ func (InsightsOperator) SwaggerDoc() map[string]string { } var map_InsightsOperatorList = map[string]string{ - "": "InsightsOperatorList is a collection of items\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "": "InsightsOperatorList is a collection of items\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", } func (InsightsOperatorList) SwaggerDoc() map[string]string { @@ -1023,9 +1150,10 @@ func (InsightsReport) SwaggerDoc() map[string]string { } var map_KubeAPIServer = map[string]string{ - "": "KubeAPIServer provides information to configure an operator to manage kube-apiserver.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "spec": "spec is the specification of the desired behavior of the Kubernetes API Server", - "status": "status is the most recently observed status of the Kubernetes API Server", + "": "KubeAPIServer provides information to configure an operator to manage kube-apiserver.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec is the specification of the desired behavior of the Kubernetes API Server", + "status": "status is the most recently observed status of the Kubernetes API Server", } func (KubeAPIServer) SwaggerDoc() map[string]string { @@ -1033,8 +1161,9 @@ func (KubeAPIServer) SwaggerDoc() map[string]string { } var map_KubeAPIServerList = map[string]string{ - "": "KubeAPIServerList is a collection of items\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "items": "Items contains the items", + "": "KubeAPIServerList is a collection of items\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "Items contains the items", } func (KubeAPIServerList) SwaggerDoc() map[string]string { @@ -1059,9 +1188,10 @@ func (ServiceAccountIssuerStatus) SwaggerDoc() map[string]string { } var map_KubeControllerManager = map[string]string{ - "": "KubeControllerManager provides information to configure an operator to manage kube-controller-manager.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "spec": "spec is the specification of the desired behavior of the Kubernetes Controller Manager", - "status": "status is the most recently observed status of the Kubernetes Controller Manager", + "": "KubeControllerManager provides information to configure an operator to manage kube-controller-manager.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec is the specification of the desired behavior of the Kubernetes Controller Manager", + "status": "status is the most recently observed status of the Kubernetes Controller Manager", } func (KubeControllerManager) SwaggerDoc() map[string]string { @@ -1069,8 +1199,9 @@ func (KubeControllerManager) SwaggerDoc() map[string]string { } var map_KubeControllerManagerList = map[string]string{ - "": "KubeControllerManagerList is a collection of items\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "items": "Items contains the items", + "": "KubeControllerManagerList is a collection of items\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "Items contains the items", } func (KubeControllerManagerList) SwaggerDoc() map[string]string { @@ -1086,7 +1217,8 @@ func (KubeControllerManagerSpec) SwaggerDoc() map[string]string { } var map_KubeStorageVersionMigrator = map[string]string{ - "": "KubeStorageVersionMigrator provides information to configure an operator to manage kube-storage-version-migrator.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "": "KubeStorageVersionMigrator provides information to configure an operator to manage kube-storage-version-migrator.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", } func (KubeStorageVersionMigrator) SwaggerDoc() map[string]string { @@ -1094,8 +1226,9 @@ func (KubeStorageVersionMigrator) SwaggerDoc() map[string]string { } var map_KubeStorageVersionMigratorList = map[string]string{ - "": "KubeStorageVersionMigratorList is a collection of items\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "items": "Items contains the items", + "": "KubeStorageVersionMigratorList is a collection of items\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "Items contains the items", } func (KubeStorageVersionMigratorList) SwaggerDoc() map[string]string { @@ -1167,6 +1300,7 @@ func (FeaturesMigration) SwaggerDoc() map[string]string { var map_GatewayConfig = map[string]string{ "": "GatewayConfig holds node gateway-related parsed config file parameters and command-line overrides", "routingViaHost": "RoutingViaHost allows pod egress traffic to exit via the ovn-k8s-mp0 management port into the host before sending it out. If this is not set, traffic will always egress directly from OVN to outside without touching the host stack. Setting this to true means hardware offload will not be supported. Default is false if GatewayConfig is specified.", + "ipForwarding": "IPForwarding controls IP forwarding for all traffic on OVN-Kubernetes managed interfaces (such as br-ex). By default this is set to Restricted, and Kubernetes related traffic is still forwarded appropriately, but other IP traffic will not be routed by the OCP node. If there is a desire to allow the host to forward traffic across OVN-Kubernetes managed interfaces, then set this field to \"Global\". The supported values are \"Restricted\" and \"Global\".", } func (GatewayConfig) SwaggerDoc() map[string]string { @@ -1209,7 +1343,7 @@ var map_KuryrConfig = map[string]string{ "poolMaxPorts": "poolMaxPorts sets a maximum number of free ports that are being kept in a port pool. If the number of ports exceeds this setting, free ports will get deleted. Setting 0 will disable this upper bound, effectively preventing pools from shrinking and this is the default value. For more information about port pools see enablePortPoolsPrepopulation setting.", "poolMinPorts": "poolMinPorts sets a minimum number of free ports that should be kept in a port pool. If the number of ports is lower than this setting, new ports will get created and added to pool. The default is 1. For more information about port pools see enablePortPoolsPrepopulation setting.", "poolBatchPorts": "poolBatchPorts sets a number of ports that should be created in a single batch request to extend the port pool. The default is 3. For more information about port pools see enablePortPoolsPrepopulation setting.", - "mtu": "mtu is the MTU that Kuryr should use when creating pod networks in Neutron. The value has to be lower or equal to the MTU of the nodes network and Neutron has to allow creation of tenant networks with such MTU. If unset Pod networks will be created with the same MTU as the nodes network has.", + "mtu": "mtu is the MTU that Kuryr should use when creating pod networks in Neutron. The value has to be lower or equal to the MTU of the nodes network and Neutron has to allow creation of tenant networks with such MTU. If unset Pod networks will be created with the same MTU as the nodes network has. This also affects the services network created by cluster-network-operator.", } func (KuryrConfig) SwaggerDoc() map[string]string { @@ -1245,7 +1379,8 @@ func (NetFlowConfig) SwaggerDoc() map[string]string { } var map_Network = map[string]string{ - "": "Network describes the cluster's desired network configuration. It is consumed by the cluster-network-operator.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "": "Network describes the cluster's desired network configuration. It is consumed by the cluster-network-operator.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", } func (Network) SwaggerDoc() map[string]string { @@ -1253,7 +1388,8 @@ func (Network) SwaggerDoc() map[string]string { } var map_NetworkList = map[string]string{ - "": "NetworkList contains a list of Network configurations\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "": "NetworkList contains a list of Network configurations\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", } func (NetworkList) SwaggerDoc() map[string]string { @@ -1331,6 +1467,7 @@ func (OpenShiftSDNConfig) SwaggerDoc() map[string]string { var map_PolicyAuditConfig = map[string]string{ "rateLimit": "rateLimit is the approximate maximum number of messages to generate per-second per-node. If unset the default of 20 msg/sec is used.", "maxFileSize": "maxFilesSize is the max size an ACL_audit log file is allowed to reach before rotation occurs Units are in MB and the Default is 50MB", + "maxLogFiles": "maxLogFiles specifies the maximum number of ACL_audit log files that can be present.", "destination": "destination is the location for policy log messages. Regardless of this config, persistent logs will always be dumped to the host at /var/log/ovn/ however Additionally syslog output may be configured as follows. Valid values are: - \"libc\" -> to use the libc syslog() function of the host node's journdald process - \"udp:host:port\" -> for sending syslog over UDP - \"unix:file\" -> for using the UNIX domain socket directly - \"null\" -> to discard all messages logged to syslog The default is \"null\"", "syslogFacility": "syslogFacility the RFC5424 facility for generated messages, e.g. \"kern\". Default is \"local0\"", } @@ -1413,9 +1550,10 @@ func (StaticIPAMRoutes) SwaggerDoc() map[string]string { } var map_OpenShiftAPIServer = map[string]string{ - "": "OpenShiftAPIServer provides information to configure an operator to manage openshift-apiserver.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "spec": "spec is the specification of the desired behavior of the OpenShift API Server.", - "status": "status defines the observed status of the OpenShift API Server.", + "": "OpenShiftAPIServer provides information to configure an operator to manage openshift-apiserver.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec is the specification of the desired behavior of the OpenShift API Server.", + "status": "status defines the observed status of the OpenShift API Server.", } func (OpenShiftAPIServer) SwaggerDoc() map[string]string { @@ -1423,8 +1561,9 @@ func (OpenShiftAPIServer) SwaggerDoc() map[string]string { } var map_OpenShiftAPIServerList = map[string]string{ - "": "OpenShiftAPIServerList is a collection of items\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "items": "Items contains the items", + "": "OpenShiftAPIServerList is a collection of items\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "Items contains the items", } func (OpenShiftAPIServerList) SwaggerDoc() map[string]string { @@ -1440,7 +1579,8 @@ func (OpenShiftAPIServerStatus) SwaggerDoc() map[string]string { } var map_OpenShiftControllerManager = map[string]string{ - "": "OpenShiftControllerManager provides information to configure an operator to manage openshift-controller-manager.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "": "OpenShiftControllerManager provides information to configure an operator to manage openshift-controller-manager.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", } func (OpenShiftControllerManager) SwaggerDoc() map[string]string { @@ -1448,8 +1588,9 @@ func (OpenShiftControllerManager) SwaggerDoc() map[string]string { } var map_OpenShiftControllerManagerList = map[string]string{ - "": "OpenShiftControllerManagerList is a collection of items\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "items": "Items contains the items", + "": "OpenShiftControllerManagerList is a collection of items\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "Items contains the items", } func (OpenShiftControllerManagerList) SwaggerDoc() map[string]string { @@ -1457,9 +1598,10 @@ func (OpenShiftControllerManagerList) SwaggerDoc() map[string]string { } var map_KubeScheduler = map[string]string{ - "": "KubeScheduler provides information to configure an operator to manage scheduler.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "spec": "spec is the specification of the desired behavior of the Kubernetes Scheduler", - "status": "status is the most recently observed status of the Kubernetes Scheduler", + "": "KubeScheduler provides information to configure an operator to manage scheduler.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec is the specification of the desired behavior of the Kubernetes Scheduler", + "status": "status is the most recently observed status of the Kubernetes Scheduler", } func (KubeScheduler) SwaggerDoc() map[string]string { @@ -1467,8 +1609,9 @@ func (KubeScheduler) SwaggerDoc() map[string]string { } var map_KubeSchedulerList = map[string]string{ - "": "KubeSchedulerList is a collection of items\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "items": "Items contains the items", + "": "KubeSchedulerList is a collection of items\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "Items contains the items", } func (KubeSchedulerList) SwaggerDoc() map[string]string { @@ -1476,9 +1619,10 @@ func (KubeSchedulerList) SwaggerDoc() map[string]string { } var map_ServiceCA = map[string]string{ - "": "ServiceCA provides information to configure an operator to manage the service cert controllers\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "spec": "spec holds user settable values for configuration", - "status": "status holds observed values from the cluster. They may not be overridden.", + "": "ServiceCA provides information to configure an operator to manage the service cert controllers\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", } func (ServiceCA) SwaggerDoc() map[string]string { @@ -1486,8 +1630,9 @@ func (ServiceCA) SwaggerDoc() map[string]string { } var map_ServiceCAList = map[string]string{ - "": "ServiceCAList is a collection of items\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "items": "Items contains the items", + "": "ServiceCAList is a collection of items\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "Items contains the items", } func (ServiceCAList) SwaggerDoc() map[string]string { @@ -1495,7 +1640,8 @@ func (ServiceCAList) SwaggerDoc() map[string]string { } var map_ServiceCatalogAPIServer = map[string]string{ - "": "ServiceCatalogAPIServer provides information to configure an operator to manage Service Catalog API Server DEPRECATED: will be removed in 4.6\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "": "ServiceCatalogAPIServer provides information to configure an operator to manage Service Catalog API Server DEPRECATED: will be removed in 4.6\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", } func (ServiceCatalogAPIServer) SwaggerDoc() map[string]string { @@ -1503,8 +1649,9 @@ func (ServiceCatalogAPIServer) SwaggerDoc() map[string]string { } var map_ServiceCatalogAPIServerList = map[string]string{ - "": "ServiceCatalogAPIServerList is a collection of items DEPRECATED: will be removed in 4.6\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "items": "Items contains the items", + "": "ServiceCatalogAPIServerList is a collection of items DEPRECATED: will be removed in 4.6\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "Items contains the items", } func (ServiceCatalogAPIServerList) SwaggerDoc() map[string]string { @@ -1512,7 +1659,8 @@ func (ServiceCatalogAPIServerList) SwaggerDoc() map[string]string { } var map_ServiceCatalogControllerManager = map[string]string{ - "": "ServiceCatalogControllerManager provides information to configure an operator to manage Service Catalog Controller Manager DEPRECATED: will be removed in 4.6\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "": "ServiceCatalogControllerManager provides information to configure an operator to manage Service Catalog Controller Manager DEPRECATED: will be removed in 4.6\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", } func (ServiceCatalogControllerManager) SwaggerDoc() map[string]string { @@ -1520,8 +1668,9 @@ func (ServiceCatalogControllerManager) SwaggerDoc() map[string]string { } var map_ServiceCatalogControllerManagerList = map[string]string{ - "": "ServiceCatalogControllerManagerList is a collection of items DEPRECATED: will be removed in 4.6\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "items": "Items contains the items", + "": "ServiceCatalogControllerManagerList is a collection of items DEPRECATED: will be removed in 4.6\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "Items contains the items", } func (ServiceCatalogControllerManagerList) SwaggerDoc() map[string]string { @@ -1529,9 +1678,10 @@ func (ServiceCatalogControllerManagerList) SwaggerDoc() map[string]string { } var map_Storage = map[string]string{ - "": "Storage provides a means to configure an operator to manage the cluster storage operator. `cluster` is the canonical name.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", - "spec": "spec holds user settable values for configuration", - "status": "status holds observed values from the cluster. They may not be overridden.", + "": "Storage provides a means to configure an operator to manage the cluster storage operator. `cluster` is the canonical name.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", } func (Storage) SwaggerDoc() map[string]string { @@ -1539,7 +1689,8 @@ func (Storage) SwaggerDoc() map[string]string { } var map_StorageList = map[string]string{ - "": "StorageList contains a list of Storages.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "": "StorageList contains a list of Storages.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", } func (StorageList) SwaggerDoc() map[string]string { @@ -1547,7 +1698,8 @@ func (StorageList) SwaggerDoc() map[string]string { } var map_StorageSpec = map[string]string{ - "": "StorageSpec is the specification of the desired behavior of the cluster storage operator.", + "": "StorageSpec is the specification of the desired behavior of the cluster storage operator.", + "vsphereStorageDriver": "VSphereStorageDriver indicates the storage driver to use on VSphere clusters. Once this field is set to CSIWithMigrationDriver, it can not be changed. If this is empty, the platform will choose a good default, which may change over time without notice. The current default is CSIWithMigrationDriver and may not be changed. DEPRECATED: This field will be removed in a future release.", } func (StorageSpec) SwaggerDoc() map[string]string { diff --git a/vendor/github.com/openshift/api/operator/v1alpha1/0000_10_01_etcdbackup-TechPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/operator/v1alpha1/0000_10_01_etcdbackup-TechPreviewNoUpgrade.crd.yaml new file mode 100644 index 000000000..a36cd9504 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1alpha1/0000_10_01_etcdbackup-TechPreviewNoUpgrade.crd.yaml @@ -0,0 +1,114 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/1482 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + release.openshift.io/feature-set: TechPreviewNoUpgrade + name: etcdbackups.operator.openshift.io +spec: + group: operator.openshift.io + names: + kind: EtcdBackup + listKind: EtcdBackupList + plural: etcdbackups + singular: etcdbackup + scope: Cluster + versions: + - name: v1alpha1 + served: true + storage: true + subresources: + status: {} + "schema": + "openAPIV3Schema": + description: "# EtcdBackup provides configuration options and status for a one-time backup attempt of the etcd cluster \n Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support." + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + type: object + properties: + pvcName: + description: PVCName specifies the name of the PersistentVolumeClaim (PVC) which binds a PersistentVolume where the etcd backup file would be saved The PVC itself must always be created in the "openshift-etcd" namespace If the PVC is left unspecified "" then the platform will choose a reasonable default location to save the backup. In the future this would be backups saved across the control-plane master nodes. + type: string + x-kubernetes-validations: + - rule: self == oldSelf + message: pvcName is immutable once set + status: + description: status holds observed values from the cluster. They may not be overridden. + type: object + properties: + backupJob: + description: backupJob is the reference to the Job that executes the backup. Optional + type: object + required: + - name + - namespace + properties: + name: + description: name is the name of the Job. Required + type: string + namespace: + description: namespace is the namespace of the Job. this is always expected to be "openshift-etcd" since the user provided PVC is also required to be in "openshift-etcd" Required + type: string + pattern: ^openshift-etcd$ + conditions: + description: conditions provide details on the status of the etcd backup job. + type: array + items: + description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, \n type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + type: object + required: + - lastTransitionTime + - message + - reason + - status + - type + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + type: string + format: date-time + message: + description: message is a human readable message indicating details about the transition. This may be an empty string. + type: string + maxLength: 32768 + observedGeneration: + description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. + type: integer + format: int64 + minimum: 0 + reason: + description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. + type: string + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + status: + description: status of the condition, one of True, False, Unknown. + type: string + enum: + - "True" + - "False" + - Unknown + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + type: string + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map diff --git a/vendor/github.com/openshift/api/operator/v1alpha1/0000_10_config-operator_01_olm-CustomNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/operator/v1alpha1/0000_10_config-operator_01_olm-CustomNoUpgrade.crd.yaml new file mode 100644 index 000000000..4068a064f --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1alpha1/0000_10_config-operator_01_olm-CustomNoUpgrade.crd.yaml @@ -0,0 +1,140 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/1504 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + release.openshift.io/feature-set: CustomNoUpgrade + name: olms.operator.openshift.io +spec: + group: operator.openshift.io + names: + kind: OLM + listKind: OLMList + plural: olms + singular: olm + scope: Cluster + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: "OLM provides information to configure an operator to manage the OLM controllers \n Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support." + type: object + required: + - metadata + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + type: object + properties: + logLevel: + description: "logLevel is an intent based logging for an overall component. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for their operands. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + type: string + default: Normal + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + managementState: + description: managementState indicates whether and how the operator should manage the component + type: string + pattern: ^(Managed|Unmanaged|Force|Removed)$ + observedConfig: + description: observedConfig holds a sparse config that controller has observed from the cluster state. It exists in spec because it is an input to the level for the operator + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + operatorLogLevel: + description: "operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + type: string + default: Normal + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + unsupportedConfigOverrides: + description: unsupportedConfigOverrides overrides the final configuration that was computed by the operator. Red Hat does not support the use of this field. Misuse of this field could lead to unexpected behavior or conflict with other configuration options. Seek guidance from the Red Hat support before using this field. Use of this property blocks cluster upgrades, it must be removed before upgrading your cluster. + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + status: + description: status holds observed values from the cluster. They may not be overridden. + type: object + properties: + conditions: + description: conditions is a list of conditions and their status + type: array + items: + description: OperatorCondition is just the standard condition fields. + type: object + properties: + lastTransitionTime: + type: string + format: date-time + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + generations: + description: generations are used to determine when an item needs to be reconciled or has changed in a way that needs a reaction. + type: array + items: + description: GenerationStatus keeps track of the generation for a given resource so that decisions about forced updates can be made. + type: object + properties: + group: + description: group is the group of the thing you're tracking + type: string + hash: + description: hash is an optional field set for resources without generation that are content sensitive like secrets and configmaps + type: string + lastGeneration: + description: lastGeneration is the last generation of the workload controller involved + type: integer + format: int64 + name: + description: name is the name of the thing you're tracking + type: string + namespace: + description: namespace is where the thing you're tracking is + type: string + resource: + description: resource is the resource type of the thing you're tracking + type: string + observedGeneration: + description: observedGeneration is the last generation change you've dealt with + type: integer + format: int64 + readyReplicas: + description: readyReplicas indicates how many replicas are ready and at the desired state + type: integer + format: int32 + version: + description: version is the level this availability applies to + type: string + x-kubernetes-validations: + - rule: self.metadata.name == 'cluster' + message: olm is a singleton, .metadata.name must be 'cluster' + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/operator/v1alpha1/0000_10_config-operator_01_olm-TechPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/operator/v1alpha1/0000_10_config-operator_01_olm-TechPreviewNoUpgrade.crd.yaml new file mode 100644 index 000000000..0e08b5113 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1alpha1/0000_10_config-operator_01_olm-TechPreviewNoUpgrade.crd.yaml @@ -0,0 +1,140 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/1504 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + release.openshift.io/feature-set: TechPreviewNoUpgrade + name: olms.operator.openshift.io +spec: + group: operator.openshift.io + names: + kind: OLM + listKind: OLMList + plural: olms + singular: olm + scope: Cluster + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: "OLM provides information to configure an operator to manage the OLM controllers \n Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support." + type: object + required: + - metadata + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec holds user settable values for configuration + type: object + properties: + logLevel: + description: "logLevel is an intent based logging for an overall component. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for their operands. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + type: string + default: Normal + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + managementState: + description: managementState indicates whether and how the operator should manage the component + type: string + pattern: ^(Managed|Unmanaged|Force|Removed)$ + observedConfig: + description: observedConfig holds a sparse config that controller has observed from the cluster state. It exists in spec because it is an input to the level for the operator + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + operatorLogLevel: + description: "operatorLogLevel is an intent based logging for the operator itself. It does not give fine grained control, but it is a simple way to manage coarse grained logging choices that operators have to interpret for themselves. \n Valid values are: \"Normal\", \"Debug\", \"Trace\", \"TraceAll\". Defaults to \"Normal\"." + type: string + default: Normal + enum: + - "" + - Normal + - Debug + - Trace + - TraceAll + unsupportedConfigOverrides: + description: unsupportedConfigOverrides overrides the final configuration that was computed by the operator. Red Hat does not support the use of this field. Misuse of this field could lead to unexpected behavior or conflict with other configuration options. Seek guidance from the Red Hat support before using this field. Use of this property blocks cluster upgrades, it must be removed before upgrading your cluster. + type: object + nullable: true + x-kubernetes-preserve-unknown-fields: true + status: + description: status holds observed values from the cluster. They may not be overridden. + type: object + properties: + conditions: + description: conditions is a list of conditions and their status + type: array + items: + description: OperatorCondition is just the standard condition fields. + type: object + properties: + lastTransitionTime: + type: string + format: date-time + message: + type: string + reason: + type: string + status: + type: string + type: + type: string + generations: + description: generations are used to determine when an item needs to be reconciled or has changed in a way that needs a reaction. + type: array + items: + description: GenerationStatus keeps track of the generation for a given resource so that decisions about forced updates can be made. + type: object + properties: + group: + description: group is the group of the thing you're tracking + type: string + hash: + description: hash is an optional field set for resources without generation that are content sensitive like secrets and configmaps + type: string + lastGeneration: + description: lastGeneration is the last generation of the workload controller involved + type: integer + format: int64 + name: + description: name is the name of the thing you're tracking + type: string + namespace: + description: namespace is where the thing you're tracking is + type: string + resource: + description: resource is the resource type of the thing you're tracking + type: string + observedGeneration: + description: observedGeneration is the last generation change you've dealt with + type: integer + format: int64 + readyReplicas: + description: readyReplicas indicates how many replicas are ready and at the desired state + type: integer + format: int32 + version: + description: version is the level this availability applies to + type: string + x-kubernetes-validations: + - rule: self.metadata.name == 'cluster' + message: olm is a singleton, .metadata.name must be 'cluster' + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/operator/v1alpha1/custom.olm.testsuite.yaml b/vendor/github.com/openshift/api/operator/v1alpha1/custom.olm.testsuite.yaml new file mode 100644 index 000000000..233e73d18 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1alpha1/custom.olm.testsuite.yaml @@ -0,0 +1,28 @@ +apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this +name: "[Custom] OLM" +crd: 0000_10_config-operator_01_olm-CustomNoUpgrade.crd.yaml +tests: + onCreate: + - name: Should be able to create a minimal OLM + initial: | + apiVersion: operator.openshift.io/v1alpha1 + kind: OLM + metadata: + name: cluster + spec: {} # No spec is required for an OLM + expected: | + apiVersion: operator.openshift.io/v1alpha1 + kind: OLM + metadata: + name: cluster + spec: + logLevel: Normal + operatorLogLevel: Normal + - name: Should reject an OLM with an invalid name + initial: | + apiVersion: operator.openshift.io/v1alpha1 + kind: OLM + metadata: + name: foo + spec: {} # No spec is required for an OLM + expectedError: "Invalid value: \"object\": olm is a singleton, .metadata.name must be 'cluster'" diff --git a/vendor/github.com/openshift/api/operator/v1alpha1/register.go b/vendor/github.com/openshift/api/operator/v1alpha1/register.go index 3c731f618..0921431c0 100644 --- a/vendor/github.com/openshift/api/operator/v1alpha1/register.go +++ b/vendor/github.com/openshift/api/operator/v1alpha1/register.go @@ -35,6 +35,10 @@ func addKnownTypes(scheme *runtime.Scheme) error { &GenericOperatorConfig{}, &ImageContentSourcePolicy{}, &ImageContentSourcePolicyList{}, + &OLM{}, + &OLMList{}, + &EtcdBackup{}, + &EtcdBackupList{}, ) return nil diff --git a/vendor/github.com/openshift/api/operator/v1alpha1/techpreview.etcdbackup.testsuite.yaml b/vendor/github.com/openshift/api/operator/v1alpha1/techpreview.etcdbackup.testsuite.yaml new file mode 100644 index 000000000..10d855189 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1alpha1/techpreview.etcdbackup.testsuite.yaml @@ -0,0 +1,38 @@ +apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this +name: "[TechPreview] EtcdBackup" +crd: 0000_10_01_etcdbackup-TechPreviewNoUpgrade.crd.yaml +tests: + onCreate: + - name: Should be able to create an EtcdBackup with a valid spec + initial: | + apiVersion: operator.openshift.io/v1alpha1 + kind: EtcdBackup + spec: + pvcName: etcdbackup-pvc + expected: | + apiVersion: operator.openshift.io/v1alpha1 + kind: EtcdBackup + spec: + pvcName: etcdbackup-pvc + - name: Should be able to create an EtcdBackup without the pvcName specified + initial: | + apiVersion: operator.openshift.io/v1alpha1 + kind: EtcdBackup + spec: {} + expected: | + apiVersion: operator.openshift.io/v1alpha1 + kind: EtcdBackup + spec: {} + onUpdate: + - name: pvcName is immutable once set + initial: | + apiVersion: operator.openshift.io/v1alpha1 + kind: EtcdBackup + spec: + pvcName: etcdbackup-pvc + updated: | + apiVersion: operator.openshift.io/v1alpha1 + kind: EtcdBackup + spec: + pvcName: updated-etcdbackup-pvc + expectedError: "spec.pvcName: Invalid value: \"string\": pvcName is immutable once set" diff --git a/vendor/github.com/openshift/api/operator/v1alpha1/techpreview.olm.testsuite.yaml b/vendor/github.com/openshift/api/operator/v1alpha1/techpreview.olm.testsuite.yaml new file mode 100644 index 000000000..99c85fe01 --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1alpha1/techpreview.olm.testsuite.yaml @@ -0,0 +1,28 @@ +apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this +name: "[Tech Preview] OLM" +crd: 0000_10_config-operator_01_olm-TechPreviewNoUpgrade.crd.yaml +tests: + onCreate: + - name: Should be able to create a minimal OLM + initial: | + apiVersion: operator.openshift.io/v1alpha1 + kind: OLM + metadata: + name: cluster + spec: {} # No spec is required for an OLM + expected: | + apiVersion: operator.openshift.io/v1alpha1 + kind: OLM + metadata: + name: cluster + spec: + logLevel: Normal + operatorLogLevel: Normal + - name: Should reject an OLM with an invalid name + initial: | + apiVersion: operator.openshift.io/v1alpha1 + kind: OLM + metadata: + name: foo + spec: {} # No spec is required for an OLM + expectedError: "Invalid value: \"object\": olm is a singleton, .metadata.name must be 'cluster'" diff --git a/vendor/github.com/openshift/api/operator/v1alpha1/types.go b/vendor/github.com/openshift/api/operator/v1alpha1/types.go index 69eb004c1..4d5a207e6 100644 --- a/vendor/github.com/openshift/api/operator/v1alpha1/types.go +++ b/vendor/github.com/openshift/api/operator/v1alpha1/types.go @@ -6,19 +6,24 @@ import ( configv1 "github.com/openshift/api/config/v1" ) +// DEPRECATED: Use v1.ManagementState instead type ManagementState string const ( // Managed means that the operator is actively managing its resources and trying to keep the component active + // DEPRECATED: Use v1.Managed instead Managed ManagementState = "Managed" // Unmanaged means that the operator is not taking any action related to the component + // DEPRECATED: Use v1.Unmanaged instead Unmanaged ManagementState = "Unmanaged" // Removed means that the operator is actively managing its resources and trying to remove all traces of the component + // DEPRECATED: Use v1.Removed instead Removed ManagementState = "Removed" ) // OperatorSpec contains common fields for an operator to need. It is intended to be anonymous included // inside of the Spec struct for you particular operator. +// DEPRECATED: Use v1.OperatorSpec instead type OperatorSpec struct { // managementState indicates whether and how the operator should manage the component ManagementState ManagementState `json:"managementState"` @@ -38,6 +43,7 @@ type OperatorSpec struct { } // LoggingConfig holds information about configuring logging +// DEPRECATED: Use v1.LogLevel instead type LoggingConfig struct { // level is passed to glog. Level int64 `json:"level"` @@ -46,24 +52,34 @@ type LoggingConfig struct { Vmodule string `json:"vmodule"` } +// DEPRECATED: Use v1.ConditionStatus instead type ConditionStatus string const ( - ConditionTrue ConditionStatus = "True" - ConditionFalse ConditionStatus = "False" + // DEPRECATED: Use v1.ConditionTrue instead + ConditionTrue ConditionStatus = "True" + // DEPRECATED: Use v1.ConditionFalse instead + ConditionFalse ConditionStatus = "False" + // DEPRECATED: Use v1.ConditionUnknown instead ConditionUnknown ConditionStatus = "Unknown" // these conditions match the conditions for the ClusterOperator type. - OperatorStatusTypeAvailable = "Available" + // DEPRECATED: Use v1.OperatorStatusTypeAvailable instead + OperatorStatusTypeAvailable = "Available" + // DEPRECATED: Use v1.OperatorStatusTypeProgressing instead OperatorStatusTypeProgressing = "Progressing" - OperatorStatusTypeFailing = "Failing" + // DEPRECATED: Use v1.OperatorStatusTypeDegraded instead + OperatorStatusTypeFailing = "Failing" + // DEPRECATED: Use v1.OperatorStatusTypeProgressing instead OperatorStatusTypeMigrating = "Migrating" // TODO this is going to be removed + // DEPRECATED: Use v1.OperatorStatusTypeAvailable instead OperatorStatusTypeSyncSuccessful = "SyncSuccessful" ) // OperatorCondition is just the standard condition fields. +// DEPRECATED: Use v1.OperatorCondition instead type OperatorCondition struct { Type string `json:"type"` Status ConditionStatus `json:"status"` @@ -73,6 +89,7 @@ type OperatorCondition struct { } // VersionAvailability gives information about the synchronization and operational status of a particular version of the component +// DEPRECATED: Use fields in v1.OperatorStatus instead type VersionAvailability struct { // version is the level this availability applies to Version string `json:"version"` @@ -87,6 +104,7 @@ type VersionAvailability struct { } // GenerationHistory keeps track of the generation for a given resource so that decisions about forced updated can be made. +// DEPRECATED: Use fields in v1.GenerationStatus instead type GenerationHistory struct { // group is the group of the thing you're tracking Group string `json:"group"` @@ -102,6 +120,7 @@ type GenerationHistory struct { // OperatorStatus contains common fields for an operator to need. It is intended to be anonymous included // inside of the Status struct for you particular operator. +// DEPRECATED: Use v1.OperatorStatus instead type OperatorStatus struct { // observedGeneration is the last generation change you've dealt with ObservedGeneration int64 `json:"observedGeneration,omitempty"` @@ -156,6 +175,7 @@ type DelegatedAuthorization struct { // StaticPodOperatorStatus is status for controllers that manage static pods. There are different needs because individual // node status must be tracked. +// DEPRECATED: Use v1.StaticPodOperatorStatus instead type StaticPodOperatorStatus struct { OperatorStatus `json:",inline"` @@ -167,6 +187,7 @@ type StaticPodOperatorStatus struct { } // NodeStatus provides information about the current state of a particular node managed by this operator. +// Deprecated: Use v1.NodeStatus instead type NodeStatus struct { // nodeName is the name of the node NodeName string `json:"nodeName"` diff --git a/vendor/github.com/openshift/api/operator/v1alpha1/types_etcdbackup.go b/vendor/github.com/openshift/api/operator/v1alpha1/types_etcdbackup.go new file mode 100644 index 000000000..b1d73306c --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1alpha1/types_etcdbackup.go @@ -0,0 +1,101 @@ +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +kubebuilder:resource:scope=Cluster +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +// # EtcdBackup provides configuration options and status for a one-time backup attempt of the etcd cluster +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +type EtcdBackup struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec holds user settable values for configuration + // +kubebuilder:validation:Required + // +required + Spec EtcdBackupSpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + // +kubebuilder:validation:Optional + // +optional + Status EtcdBackupStatus `json:"status"` +} + +type EtcdBackupSpec struct { + // PVCName specifies the name of the PersistentVolumeClaim (PVC) which binds a PersistentVolume where the + // etcd backup file would be saved + // The PVC itself must always be created in the "openshift-etcd" namespace + // If the PVC is left unspecified "" then the platform will choose a reasonable default location to save the backup. + // In the future this would be backups saved across the control-plane master nodes. + // +kubebuilder:validation:Optional + // +optional + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="pvcName is immutable once set" + PVCName string `json:"pvcName"` +} + +// +kubebuilder:validation:Optional +type EtcdBackupStatus struct { + // conditions provide details on the status of the etcd backup job. + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + // +optional + Conditions []metav1.Condition `json:"conditions" patchStrategy:"merge" patchMergeKey:"type"` + + // backupJob is the reference to the Job that executes the backup. + // Optional + // +kubebuilder:validation:Optional + BackupJob *BackupJobReference `json:"backupJob"` +} + +// BackupJobReference holds a reference to the batch/v1 Job created to run the etcd backup +type BackupJobReference struct { + + // namespace is the namespace of the Job. + // this is always expected to be "openshift-etcd" since the user provided PVC + // is also required to be in "openshift-etcd" + // Required + // +kubebuilder:validation:Required + // +kubebuilder:validation:Pattern:=`^openshift-etcd$` + Namespace string `json:"namespace"` + + // name is the name of the Job. + // Required + // +kubebuilder:validation:Required + Name string `json:"name"` +} + +type BackupConditionReason string + +var ( + // BackupPending is added to the EtcdBackupStatus Conditions when the etcd backup is pending. + BackupPending BackupConditionReason = "BackupPending" + + // BackupCompleted is added to the EtcdBackupStatus Conditions when the etcd backup has completed. + BackupCompleted BackupConditionReason = "BackupCompleted" + + // BackupFailed is added to the EtcdBackupStatus Conditions when the etcd backup has failed. + BackupFailed BackupConditionReason = "BackupFailed" + + // BackupSkipped is added to the EtcdBackupStatus Conditions when the etcd backup has been skipped. + BackupSkipped BackupConditionReason = "BackupSkipped" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// EtcdBackupList is a collection of items +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +type EtcdBackupList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + Items []EtcdBackup `json:"items"` +} diff --git a/vendor/github.com/openshift/api/operator/v1alpha1/types_image_content_source_policy.go b/vendor/github.com/openshift/api/operator/v1alpha1/types_image_content_source_policy.go index 75b5dd7fc..1a101cad6 100644 --- a/vendor/github.com/openshift/api/operator/v1alpha1/types_image_content_source_policy.go +++ b/vendor/github.com/openshift/api/operator/v1alpha1/types_image_content_source_policy.go @@ -12,7 +12,10 @@ import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" // Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. // +openshift:compatibility-gen:level=4 type ImageContentSourcePolicy struct { - metav1.TypeMeta `json:",inline"` + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata,omitempty"` // spec holds user settable values for configuration @@ -51,6 +54,9 @@ type ImageContentSourcePolicySpec struct { // +openshift:compatibility-gen:level=4 type ImageContentSourcePolicyList struct { metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata metav1.ListMeta `json:"metadata"` Items []ImageContentSourcePolicy `json:"items"` diff --git a/vendor/github.com/openshift/api/operator/v1alpha1/types_olm.go b/vendor/github.com/openshift/api/operator/v1alpha1/types_olm.go new file mode 100644 index 000000000..8f20690ae --- /dev/null +++ b/vendor/github.com/openshift/api/operator/v1alpha1/types_olm.go @@ -0,0 +1,56 @@ +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + operatorv1 "github.com/openshift/api/operator/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// OLM provides information to configure an operator to manage the OLM controllers +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +// +kubebuilder:validation:XValidation:rule="self.metadata.name == 'cluster'",message="olm is a singleton, .metadata.name must be 'cluster'" +type OLM struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata"` + + //spec holds user settable values for configuration + // +kubebuilder:validation:Required + Spec OLMSpec `json:"spec"` + // status holds observed values from the cluster. They may not be overridden. + // +optional + Status OLMStatus `json:"status"` +} + +type OLMSpec struct { + operatorv1.OperatorSpec `json:",inline"` +} + +type OLMStatus struct { + operatorv1.OperatorStatus `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// OLMList is a collection of items +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +type OLMList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + // Items contains the items + Items []OLM `json:"items"` +} diff --git a/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.deepcopy.go index 4013889c6..08ef2811a 100644 --- a/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.deepcopy.go @@ -6,9 +6,26 @@ package v1alpha1 import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BackupJobReference) DeepCopyInto(out *BackupJobReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BackupJobReference. +func (in *BackupJobReference) DeepCopy() *BackupJobReference { + if in == nil { + return nil + } + out := new(BackupJobReference) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DelegatedAuthentication) DeepCopyInto(out *DelegatedAuthentication) { *out = *in @@ -41,6 +58,111 @@ func (in *DelegatedAuthorization) DeepCopy() *DelegatedAuthorization { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EtcdBackup) DeepCopyInto(out *EtcdBackup) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdBackup. +func (in *EtcdBackup) DeepCopy() *EtcdBackup { + if in == nil { + return nil + } + out := new(EtcdBackup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EtcdBackup) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EtcdBackupList) DeepCopyInto(out *EtcdBackupList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]EtcdBackup, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdBackupList. +func (in *EtcdBackupList) DeepCopy() *EtcdBackupList { + if in == nil { + return nil + } + out := new(EtcdBackupList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EtcdBackupList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EtcdBackupSpec) DeepCopyInto(out *EtcdBackupSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdBackupSpec. +func (in *EtcdBackupSpec) DeepCopy() *EtcdBackupSpec { + if in == nil { + return nil + } + out := new(EtcdBackupSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EtcdBackupStatus) DeepCopyInto(out *EtcdBackupStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.BackupJob != nil { + in, out := &in.BackupJob, &out.BackupJob + *out = new(BackupJobReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EtcdBackupStatus. +func (in *EtcdBackupStatus) DeepCopy() *EtcdBackupStatus { + if in == nil { + return nil + } + out := new(EtcdBackupStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *GenerationHistory) DeepCopyInto(out *GenerationHistory) { *out = *in @@ -206,6 +328,101 @@ func (in *NodeStatus) DeepCopy() *NodeStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OLM) DeepCopyInto(out *OLM) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OLM. +func (in *OLM) DeepCopy() *OLM { + if in == nil { + return nil + } + out := new(OLM) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OLM) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OLMList) DeepCopyInto(out *OLMList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]OLM, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OLMList. +func (in *OLMList) DeepCopy() *OLMList { + if in == nil { + return nil + } + out := new(OLMList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OLMList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OLMSpec) DeepCopyInto(out *OLMSpec) { + *out = *in + in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OLMSpec. +func (in *OLMSpec) DeepCopy() *OLMSpec { + if in == nil { + return nil + } + out := new(OLMSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OLMStatus) DeepCopyInto(out *OLMStatus) { + *out = *in + in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OLMStatus. +func (in *OLMStatus) DeepCopy() *OLMStatus { + if in == nil { + return nil + } + out := new(OLMStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *OperatorCondition) DeepCopyInto(out *OperatorCondition) { *out = *in diff --git a/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.swagger_doc_generated.go index 7897be3e5..c8cce688f 100644 --- a/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.swagger_doc_generated.go +++ b/vendor/github.com/openshift/api/operator/v1alpha1/zz_generated.swagger_doc_generated.go @@ -30,7 +30,7 @@ func (DelegatedAuthorization) SwaggerDoc() map[string]string { } var map_GenerationHistory = map[string]string{ - "": "GenerationHistory keeps track of the generation for a given resource so that decisions about forced updated can be made.", + "": "GenerationHistory keeps track of the generation for a given resource so that decisions about forced updated can be made. DEPRECATED: Use fields in v1.GenerationStatus instead", "group": "group is the group of the thing you're tracking", "resource": "resource is the resource type of the thing you're tracking", "namespace": "namespace is where the thing you're tracking is", @@ -55,7 +55,7 @@ func (GenericOperatorConfig) SwaggerDoc() map[string]string { } var map_LoggingConfig = map[string]string{ - "": "LoggingConfig holds information about configuring logging", + "": "LoggingConfig holds information about configuring logging DEPRECATED: Use v1.LogLevel instead", "level": "level is passed to glog.", "vmodule": "vmodule is passed to glog.", } @@ -65,7 +65,7 @@ func (LoggingConfig) SwaggerDoc() map[string]string { } var map_NodeStatus = map[string]string{ - "": "NodeStatus provides information about the current state of a particular node managed by this operator.", + "": "NodeStatus provides information about the current state of a particular node managed by this operator. Deprecated: Use v1.NodeStatus instead", "nodeName": "nodeName is the name of the node", "currentDeploymentGeneration": "currentDeploymentGeneration is the generation of the most recently successful deployment", "targetDeploymentGeneration": "targetDeploymentGeneration is the generation of the deployment we're trying to apply", @@ -78,7 +78,7 @@ func (NodeStatus) SwaggerDoc() map[string]string { } var map_OperatorCondition = map[string]string{ - "": "OperatorCondition is just the standard condition fields.", + "": "OperatorCondition is just the standard condition fields. DEPRECATED: Use v1.OperatorCondition instead", } func (OperatorCondition) SwaggerDoc() map[string]string { @@ -86,7 +86,7 @@ func (OperatorCondition) SwaggerDoc() map[string]string { } var map_OperatorSpec = map[string]string{ - "": "OperatorSpec contains common fields for an operator to need. It is intended to be anonymous included inside of the Spec struct for you particular operator.", + "": "OperatorSpec contains common fields for an operator to need. It is intended to be anonymous included inside of the Spec struct for you particular operator. DEPRECATED: Use v1.OperatorSpec instead", "managementState": "managementState indicates whether and how the operator should manage the component", "imagePullSpec": "imagePullSpec is the image to use for the component.", "imagePullPolicy": "imagePullPolicy specifies the image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise.", @@ -99,7 +99,7 @@ func (OperatorSpec) SwaggerDoc() map[string]string { } var map_OperatorStatus = map[string]string{ - "": "OperatorStatus contains common fields for an operator to need. It is intended to be anonymous included inside of the Status struct for you particular operator.", + "": "OperatorStatus contains common fields for an operator to need. It is intended to be anonymous included inside of the Status struct for you particular operator. DEPRECATED: Use v1.OperatorStatus instead", "observedGeneration": "observedGeneration is the last generation change you've dealt with", "conditions": "conditions is a list of conditions and their status", "state": "state indicates what the operator has observed to be its current operational status.", @@ -113,7 +113,7 @@ func (OperatorStatus) SwaggerDoc() map[string]string { } var map_StaticPodOperatorStatus = map[string]string{ - "": "StaticPodOperatorStatus is status for controllers that manage static pods. There are different needs because individual node status must be tracked.", + "": "StaticPodOperatorStatus is status for controllers that manage static pods. There are different needs because individual node status must be tracked. DEPRECATED: Use v1.StaticPodOperatorStatus instead", "latestAvailableDeploymentGeneration": "latestAvailableDeploymentGeneration is the deploymentID of the most recent deployment", "nodeStatuses": "nodeStatuses track the deployment values and errors across individual nodes", } @@ -123,7 +123,7 @@ func (StaticPodOperatorStatus) SwaggerDoc() map[string]string { } var map_VersionAvailability = map[string]string{ - "": "VersionAvailability gives information about the synchronization and operational status of a particular version of the component", + "": "VersionAvailability gives information about the synchronization and operational status of a particular version of the component DEPRECATED: Use fields in v1.OperatorStatus instead", "version": "version is the level this availability applies to", "updatedReplicas": "updatedReplicas indicates how many replicas are at the desired state", "readyReplicas": "readyReplicas indicates how many replicas are ready and at the desired state", @@ -135,9 +135,55 @@ func (VersionAvailability) SwaggerDoc() map[string]string { return map_VersionAvailability } +var map_BackupJobReference = map[string]string{ + "": "BackupJobReference holds a reference to the batch/v1 Job created to run the etcd backup", + "namespace": "namespace is the namespace of the Job. this is always expected to be \"openshift-etcd\" since the user provided PVC is also required to be in \"openshift-etcd\" Required", + "name": "name is the name of the Job. Required", +} + +func (BackupJobReference) SwaggerDoc() map[string]string { + return map_BackupJobReference +} + +var map_EtcdBackup = map[string]string{ + "": "\n\n# EtcdBackup provides configuration options and status for a one-time backup attempt of the etcd cluster\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (EtcdBackup) SwaggerDoc() map[string]string { + return map_EtcdBackup +} + +var map_EtcdBackupList = map[string]string{ + "": "EtcdBackupList is a collection of items\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", +} + +func (EtcdBackupList) SwaggerDoc() map[string]string { + return map_EtcdBackupList +} + +var map_EtcdBackupSpec = map[string]string{ + "pvcName": "PVCName specifies the name of the PersistentVolumeClaim (PVC) which binds a PersistentVolume where the etcd backup file would be saved The PVC itself must always be created in the \"openshift-etcd\" namespace If the PVC is left unspecified \"\" then the platform will choose a reasonable default location to save the backup. In the future this would be backups saved across the control-plane master nodes.", +} + +func (EtcdBackupSpec) SwaggerDoc() map[string]string { + return map_EtcdBackupSpec +} + +var map_EtcdBackupStatus = map[string]string{ + "conditions": "conditions provide details on the status of the etcd backup job.", + "backupJob": "backupJob is the reference to the Job that executes the backup. Optional", +} + +func (EtcdBackupStatus) SwaggerDoc() map[string]string { + return map_EtcdBackupStatus +} + var map_ImageContentSourcePolicy = map[string]string{ - "": "ImageContentSourcePolicy holds cluster-wide information about how to handle registry mirror rules. When multiple policies are defined, the outcome of the behavior is defined on each field.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", - "spec": "spec holds user settable values for configuration", + "": "ImageContentSourcePolicy holds cluster-wide information about how to handle registry mirror rules. When multiple policies are defined, the outcome of the behavior is defined on each field.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec holds user settable values for configuration", } func (ImageContentSourcePolicy) SwaggerDoc() map[string]string { @@ -145,7 +191,8 @@ func (ImageContentSourcePolicy) SwaggerDoc() map[string]string { } var map_ImageContentSourcePolicyList = map[string]string{ - "": "ImageContentSourcePolicyList lists the items in the ImageContentSourcePolicy CRD.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "": "ImageContentSourcePolicyList lists the items in the ImageContentSourcePolicy CRD.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", } func (ImageContentSourcePolicyList) SwaggerDoc() map[string]string { @@ -171,4 +218,25 @@ func (RepositoryDigestMirrors) SwaggerDoc() map[string]string { return map_RepositoryDigestMirrors } +var map_OLM = map[string]string{ + "": "OLM provides information to configure an operator to manage the OLM controllers\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec holds user settable values for configuration", + "status": "status holds observed values from the cluster. They may not be overridden.", +} + +func (OLM) SwaggerDoc() map[string]string { + return map_OLM +} + +var map_OLMList = map[string]string{ + "": "OLMList is a collection of items\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "Items contains the items", +} + +func (OLMList) SwaggerDoc() map[string]string { + return map_OLMList +} + // AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/api/operatorcontrolplane/.codegen.yaml b/vendor/github.com/openshift/api/operatorcontrolplane/.codegen.yaml new file mode 100644 index 000000000..ffa2c8d9b --- /dev/null +++ b/vendor/github.com/openshift/api/operatorcontrolplane/.codegen.yaml @@ -0,0 +1,2 @@ +swaggerdocs: + commentPolicy: Warn diff --git a/vendor/github.com/openshift/api/operatorcontrolplane/install.go b/vendor/github.com/openshift/api/operatorcontrolplane/install.go new file mode 100644 index 000000000..8e8abd0ab --- /dev/null +++ b/vendor/github.com/openshift/api/operatorcontrolplane/install.go @@ -0,0 +1,26 @@ +package operatorcontrolplane + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + "github.com/openshift/api/operatorcontrolplane/v1alpha1" +) + +const ( + GroupName = "controlplane.operator.openshift.io" +) + +var ( + schemeBuilder = runtime.NewSchemeBuilder(v1alpha1.Install) + // Install is a function which adds every version of this group to a scheme + Install = schemeBuilder.AddToScheme +) + +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +func Kind(kind string) schema.GroupKind { + return schema.GroupKind{Group: GroupName, Kind: kind} +} diff --git a/vendor/github.com/openshift/api/operatorcontrolplane/v1alpha1/0000_10-pod-network-connectivity-check.crd.yaml b/vendor/github.com/openshift/api/operatorcontrolplane/v1alpha1/0000_10-pod-network-connectivity-check.crd.yaml new file mode 100644 index 000000000..891190219 --- /dev/null +++ b/vendor/github.com/openshift/api/operatorcontrolplane/v1alpha1/0000_10-pod-network-connectivity-check.crd.yaml @@ -0,0 +1,227 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/639 + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: podnetworkconnectivitychecks.controlplane.operator.openshift.io +spec: + group: controlplane.operator.openshift.io + names: + kind: PodNetworkConnectivityCheck + listKind: PodNetworkConnectivityCheckList + plural: podnetworkconnectivitychecks + singular: podnetworkconnectivitycheck + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: "PodNetworkConnectivityCheck \n Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support." + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Spec defines the source and target of the connectivity check + type: object + required: + - sourcePod + - targetEndpoint + properties: + sourcePod: + description: SourcePod names the pod from which the condition will be checked + type: string + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + targetEndpoint: + description: EndpointAddress to check. A TCP address of the form host:port. Note that if host is a DNS name, then the check would fail if the DNS name cannot be resolved. Specify an IP address for host to bypass DNS name lookup. + type: string + pattern: ^\S+:\d*$ + tlsClientCert: + description: TLSClientCert, if specified, references a kubernetes.io/tls type secret with 'tls.crt' and 'tls.key' entries containing an optional TLS client certificate and key to be used when checking endpoints that require a client certificate in order to gracefully preform the scan without causing excessive logging in the endpoint process. The secret must exist in the same namespace as this resource. + type: object + required: + - name + properties: + name: + description: name is the metadata.name of the referenced secret + type: string + status: + description: Status contains the observed status of the connectivity check + type: object + properties: + conditions: + description: Conditions summarize the status of the check + type: array + items: + description: PodNetworkConnectivityCheckCondition represents the overall status of the pod network connectivity. + type: object + required: + - lastTransitionTime + - status + - type + properties: + lastTransitionTime: + description: Last time the condition transitioned from one status to another. + type: string + format: date-time + nullable: true + message: + description: Message indicating details about last transition in a human readable format. + type: string + reason: + description: Reason for the condition's last status transition in a machine readable format. + type: string + status: + description: Status of the condition + type: string + type: + description: Type of the condition + type: string + failures: + description: Failures contains logs of unsuccessful check actions + type: array + items: + description: LogEntry records events + type: object + required: + - success + - time + properties: + latency: + description: Latency records how long the action mentioned in the entry took. + type: string + nullable: true + message: + description: Message explaining status in a human readable format. + type: string + reason: + description: Reason for status in a machine readable format. + type: string + success: + description: Success indicates if the log entry indicates a success or failure. + type: boolean + time: + description: Start time of check action. + type: string + format: date-time + nullable: true + outages: + description: Outages contains logs of time periods of outages + type: array + items: + description: OutageEntry records time period of an outage + type: object + required: + - start + properties: + end: + description: End of outage detected + type: string + format: date-time + nullable: true + endLogs: + description: EndLogs contains log entries related to the end of this outage. Should contain the success entry that resolved the outage and possibly a few of the failure log entries that preceded it. + type: array + items: + description: LogEntry records events + type: object + required: + - success + - time + properties: + latency: + description: Latency records how long the action mentioned in the entry took. + type: string + nullable: true + message: + description: Message explaining status in a human readable format. + type: string + reason: + description: Reason for status in a machine readable format. + type: string + success: + description: Success indicates if the log entry indicates a success or failure. + type: boolean + time: + description: Start time of check action. + type: string + format: date-time + nullable: true + message: + description: Message summarizes outage details in a human readable format. + type: string + start: + description: Start of outage detected + type: string + format: date-time + nullable: true + startLogs: + description: StartLogs contains log entries related to the start of this outage. Should contain the original failure, any entries where the failure mode changed. + type: array + items: + description: LogEntry records events + type: object + required: + - success + - time + properties: + latency: + description: Latency records how long the action mentioned in the entry took. + type: string + nullable: true + message: + description: Message explaining status in a human readable format. + type: string + reason: + description: Reason for status in a machine readable format. + type: string + success: + description: Success indicates if the log entry indicates a success or failure. + type: boolean + time: + description: Start time of check action. + type: string + format: date-time + nullable: true + successes: + description: Successes contains logs successful check actions + type: array + items: + description: LogEntry records events + type: object + required: + - success + - time + properties: + latency: + description: Latency records how long the action mentioned in the entry took. + type: string + nullable: true + message: + description: Message explaining status in a human readable format. + type: string + reason: + description: Reason for status in a machine readable format. + type: string + success: + description: Success indicates if the log entry indicates a success or failure. + type: boolean + time: + description: Start time of check action. + type: string + format: date-time + nullable: true + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/operatorcontrolplane/v1alpha1/Makefile b/vendor/github.com/openshift/api/operatorcontrolplane/v1alpha1/Makefile new file mode 100644 index 000000000..11371b126 --- /dev/null +++ b/vendor/github.com/openshift/api/operatorcontrolplane/v1alpha1/Makefile @@ -0,0 +1,3 @@ +.PHONY: test +test: + make -C ../../tests test GINKGO_EXTRA_ARGS=--focus="controlplane.operator.openshift.io/v1alpha1" diff --git a/vendor/github.com/openshift/api/operatorcontrolplane/v1alpha1/doc.go b/vendor/github.com/openshift/api/operatorcontrolplane/v1alpha1/doc.go new file mode 100644 index 000000000..73f55856a --- /dev/null +++ b/vendor/github.com/openshift/api/operatorcontrolplane/v1alpha1/doc.go @@ -0,0 +1,8 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true + +// +kubebuilder:validation:Optional +// +groupName=controlplane.operator.openshift.io + +package v1alpha1 diff --git a/vendor/github.com/openshift/api/operatorcontrolplane/v1alpha1/register.go b/vendor/github.com/openshift/api/operatorcontrolplane/v1alpha1/register.go new file mode 100644 index 000000000..1ffc55381 --- /dev/null +++ b/vendor/github.com/openshift/api/operatorcontrolplane/v1alpha1/register.go @@ -0,0 +1,39 @@ +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + GroupName = "controlplane.operator.openshift.io" + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // Install is a function which adds this version to a scheme + Install = schemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = schemeBuilder.AddToScheme +) + +// Resource generated code relies on this being here, but it logically belongs to the group +// DEPRECATED +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +func addKnownTypes(scheme *runtime.Scheme) error { + metav1.AddToGroupVersion(scheme, GroupVersion) + + scheme.AddKnownTypes(GroupVersion, + &PodNetworkConnectivityCheck{}, + &PodNetworkConnectivityCheckList{}, + ) + + return nil +} diff --git a/vendor/github.com/openshift/api/operatorcontrolplane/v1alpha1/stable.podnetworkconnectivitycheck.testsuite.yaml b/vendor/github.com/openshift/api/operatorcontrolplane/v1alpha1/stable.podnetworkconnectivitycheck.testsuite.yaml new file mode 100644 index 000000000..2db10295c --- /dev/null +++ b/vendor/github.com/openshift/api/operatorcontrolplane/v1alpha1/stable.podnetworkconnectivitycheck.testsuite.yaml @@ -0,0 +1,18 @@ +apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this +name: "[Stable] PodNetworkConnectivityCheck" +crd: 0000_10-pod-network-connectivity-check.crd.yaml +tests: + onCreate: + - name: Should be able to create a minimal PodNetworkConnectivityCheck + initial: | + apiVersion: controlplane.operator.openshift.io/v1alpha1 + kind: PodNetworkConnectivityCheck + spec: + sourcePod: foo + targetEndpoint: foo:0 + expected: | + apiVersion: controlplane.operator.openshift.io/v1alpha1 + kind: PodNetworkConnectivityCheck + spec: + sourcePod: foo + targetEndpoint: foo:0 diff --git a/vendor/github.com/openshift/api/operatorcontrolplane/v1alpha1/types_conditioncheck.go b/vendor/github.com/openshift/api/operatorcontrolplane/v1alpha1/types_conditioncheck.go new file mode 100644 index 000000000..daaa8a33f --- /dev/null +++ b/vendor/github.com/openshift/api/operatorcontrolplane/v1alpha1/types_conditioncheck.go @@ -0,0 +1,193 @@ +// Package v1alpha1 is an API version in the controlplane.operator.openshift.io group +package v1alpha1 + +import ( + v1 "github.com/openshift/api/config/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodNetworkConnectivityCheck +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +kubebuilder:subresource:status +// +openshift:compatibility-gen:level=4 +type PodNetworkConnectivityCheck struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata"` + + // Spec defines the source and target of the connectivity check + // +kubebuilder:validation:Required + // +required + Spec PodNetworkConnectivityCheckSpec `json:"spec"` + + // Status contains the observed status of the connectivity check + // +optional + Status PodNetworkConnectivityCheckStatus `json:"status,omitempty"` +} + +type PodNetworkConnectivityCheckSpec struct { + // SourcePod names the pod from which the condition will be checked + // +kubebuilder:validation:Required + // +kubebuilder:validation:Pattern=`^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$` + // +required + SourcePod string `json:"sourcePod"` + + // EndpointAddress to check. A TCP address of the form host:port. Note that + // if host is a DNS name, then the check would fail if the DNS name cannot + // be resolved. Specify an IP address for host to bypass DNS name lookup. + // +kubebuilder:validation:Required + // +kubebuilder:validation:Pattern=`^\S+:\d*$` + // +required + TargetEndpoint string `json:"targetEndpoint"` + + // TLSClientCert, if specified, references a kubernetes.io/tls type secret with 'tls.crt' and + // 'tls.key' entries containing an optional TLS client certificate and key to be used when + // checking endpoints that require a client certificate in order to gracefully preform the + // scan without causing excessive logging in the endpoint process. The secret must exist in + // the same namespace as this resource. + // +optional + TLSClientCert v1.SecretNameReference `json:"tlsClientCert,omitempty"` +} + +// +k8s:deepcopy-gen=true +type PodNetworkConnectivityCheckStatus struct { + // Successes contains logs successful check actions + // +optional + Successes []LogEntry `json:"successes,omitempty"` + + // Failures contains logs of unsuccessful check actions + // +optional + Failures []LogEntry `json:"failures,omitempty"` + + // Outages contains logs of time periods of outages + // +optional + Outages []OutageEntry `json:"outages,omitempty"` + + // Conditions summarize the status of the check + // +patchMergeKey=type + // +patchStrategy=merge + // +optional + Conditions []PodNetworkConnectivityCheckCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` +} + +// LogEntry records events +type LogEntry struct { + // Start time of check action. + // +kubebuilder:validation:Required + // +required + // +nullable + Start metav1.Time `json:"time"` + + // Success indicates if the log entry indicates a success or failure. + // +kubebuilder:validation:Required + // +required + Success bool `json:"success"` + + // Reason for status in a machine readable format. + // +optional + Reason string `json:"reason,omitempty"` + + // Message explaining status in a human readable format. + // +optional + Message string `json:"message,omitempty"` + + // Latency records how long the action mentioned in the entry took. + // +optional + // +nullable + Latency metav1.Duration `json:"latency,omitempty"` +} + +// OutageEntry records time period of an outage +type OutageEntry struct { + + // Start of outage detected + // +kubebuilder:validation:Required + // +required + // +nullable + Start metav1.Time `json:"start"` + + // End of outage detected + // +optional + // +nullable + End metav1.Time `json:"end,omitempty"` + + // StartLogs contains log entries related to the start of this outage. Should contain + // the original failure, any entries where the failure mode changed. + // +optional + StartLogs []LogEntry `json:"startLogs,omitempty"` + + // EndLogs contains log entries related to the end of this outage. Should contain the success + // entry that resolved the outage and possibly a few of the failure log entries that preceded it. + // +optional + EndLogs []LogEntry `json:"endLogs,omitempty"` + + // Message summarizes outage details in a human readable format. + // +optional + Message string `json:"message,omitempty"` +} + +// PodNetworkConnectivityCheckCondition represents the overall status of the pod network connectivity. +// +k8s:deepcopy-gen=true +type PodNetworkConnectivityCheckCondition struct { + + // Type of the condition + // +kubebuilder:validation:Required + // +required + Type PodNetworkConnectivityCheckConditionType `json:"type"` + + // Status of the condition + // +kubebuilder:validation:Required + // +required + Status metav1.ConditionStatus `json:"status"` + + // Reason for the condition's last status transition in a machine readable format. + // +optional + Reason string `json:"reason,omitempty"` + + // Message indicating details about last transition in a human readable format. + // +optional + Message string `json:"message,omitempty"` + + // Last time the condition transitioned from one status to another. + // +kubebuilder:validation:Required + // +required + // +nullable + LastTransitionTime metav1.Time `json:"lastTransitionTime"` +} + +const ( + LogEntryReasonDNSResolve = "DNSResolve" + LogEntryReasonDNSError = "DNSError" + LogEntryReasonTCPConnect = "TCPConnect" + LogEntryReasonTCPConnectError = "TCPConnectError" +) + +type PodNetworkConnectivityCheckConditionType string + +const ( + // Reachable indicates that the endpoint was reachable from the pod. + Reachable PodNetworkConnectivityCheckConditionType = "Reachable" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodNetworkConnectivityCheckList is a collection of PodNetworkConnectivityCheck +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +type PodNetworkConnectivityCheckList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata"` + + // Items contains the items + Items []PodNetworkConnectivityCheck `json:"items"` +} diff --git a/vendor/github.com/openshift/api/operatorcontrolplane/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/operatorcontrolplane/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 000000000..26431d8c1 --- /dev/null +++ b/vendor/github.com/openshift/api/operatorcontrolplane/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,199 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LogEntry) DeepCopyInto(out *LogEntry) { + *out = *in + in.Start.DeepCopyInto(&out.Start) + out.Latency = in.Latency + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LogEntry. +func (in *LogEntry) DeepCopy() *LogEntry { + if in == nil { + return nil + } + out := new(LogEntry) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OutageEntry) DeepCopyInto(out *OutageEntry) { + *out = *in + in.Start.DeepCopyInto(&out.Start) + in.End.DeepCopyInto(&out.End) + if in.StartLogs != nil { + in, out := &in.StartLogs, &out.StartLogs + *out = make([]LogEntry, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EndLogs != nil { + in, out := &in.EndLogs, &out.EndLogs + *out = make([]LogEntry, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OutageEntry. +func (in *OutageEntry) DeepCopy() *OutageEntry { + if in == nil { + return nil + } + out := new(OutageEntry) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodNetworkConnectivityCheck) DeepCopyInto(out *PodNetworkConnectivityCheck) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodNetworkConnectivityCheck. +func (in *PodNetworkConnectivityCheck) DeepCopy() *PodNetworkConnectivityCheck { + if in == nil { + return nil + } + out := new(PodNetworkConnectivityCheck) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodNetworkConnectivityCheck) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodNetworkConnectivityCheckCondition) DeepCopyInto(out *PodNetworkConnectivityCheckCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodNetworkConnectivityCheckCondition. +func (in *PodNetworkConnectivityCheckCondition) DeepCopy() *PodNetworkConnectivityCheckCondition { + if in == nil { + return nil + } + out := new(PodNetworkConnectivityCheckCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodNetworkConnectivityCheckList) DeepCopyInto(out *PodNetworkConnectivityCheckList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PodNetworkConnectivityCheck, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodNetworkConnectivityCheckList. +func (in *PodNetworkConnectivityCheckList) DeepCopy() *PodNetworkConnectivityCheckList { + if in == nil { + return nil + } + out := new(PodNetworkConnectivityCheckList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodNetworkConnectivityCheckList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodNetworkConnectivityCheckSpec) DeepCopyInto(out *PodNetworkConnectivityCheckSpec) { + *out = *in + out.TLSClientCert = in.TLSClientCert + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodNetworkConnectivityCheckSpec. +func (in *PodNetworkConnectivityCheckSpec) DeepCopy() *PodNetworkConnectivityCheckSpec { + if in == nil { + return nil + } + out := new(PodNetworkConnectivityCheckSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodNetworkConnectivityCheckStatus) DeepCopyInto(out *PodNetworkConnectivityCheckStatus) { + *out = *in + if in.Successes != nil { + in, out := &in.Successes, &out.Successes + *out = make([]LogEntry, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Failures != nil { + in, out := &in.Failures, &out.Failures + *out = make([]LogEntry, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Outages != nil { + in, out := &in.Outages, &out.Outages + *out = make([]OutageEntry, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]PodNetworkConnectivityCheckCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodNetworkConnectivityCheckStatus. +func (in *PodNetworkConnectivityCheckStatus) DeepCopy() *PodNetworkConnectivityCheckStatus { + if in == nil { + return nil + } + out := new(PodNetworkConnectivityCheckStatus) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/api/operatorcontrolplane/v1alpha1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/operatorcontrolplane/v1alpha1/zz_generated.swagger_doc_generated.go new file mode 100644 index 000000000..5ecc5e48a --- /dev/null +++ b/vendor/github.com/openshift/api/operatorcontrolplane/v1alpha1/zz_generated.swagger_doc_generated.go @@ -0,0 +1,95 @@ +package v1alpha1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_LogEntry = map[string]string{ + "": "LogEntry records events", + "time": "Start time of check action.", + "success": "Success indicates if the log entry indicates a success or failure.", + "reason": "Reason for status in a machine readable format.", + "message": "Message explaining status in a human readable format.", + "latency": "Latency records how long the action mentioned in the entry took.", +} + +func (LogEntry) SwaggerDoc() map[string]string { + return map_LogEntry +} + +var map_OutageEntry = map[string]string{ + "": "OutageEntry records time period of an outage", + "start": "Start of outage detected", + "end": "End of outage detected", + "startLogs": "StartLogs contains log entries related to the start of this outage. Should contain the original failure, any entries where the failure mode changed.", + "endLogs": "EndLogs contains log entries related to the end of this outage. Should contain the success entry that resolved the outage and possibly a few of the failure log entries that preceded it.", + "message": "Message summarizes outage details in a human readable format.", +} + +func (OutageEntry) SwaggerDoc() map[string]string { + return map_OutageEntry +} + +var map_PodNetworkConnectivityCheck = map[string]string{ + "": "PodNetworkConnectivityCheck\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Spec defines the source and target of the connectivity check", + "status": "Status contains the observed status of the connectivity check", +} + +func (PodNetworkConnectivityCheck) SwaggerDoc() map[string]string { + return map_PodNetworkConnectivityCheck +} + +var map_PodNetworkConnectivityCheckCondition = map[string]string{ + "": "PodNetworkConnectivityCheckCondition represents the overall status of the pod network connectivity.", + "type": "Type of the condition", + "status": "Status of the condition", + "reason": "Reason for the condition's last status transition in a machine readable format.", + "message": "Message indicating details about last transition in a human readable format.", + "lastTransitionTime": "Last time the condition transitioned from one status to another.", +} + +func (PodNetworkConnectivityCheckCondition) SwaggerDoc() map[string]string { + return map_PodNetworkConnectivityCheckCondition +} + +var map_PodNetworkConnectivityCheckList = map[string]string{ + "": "PodNetworkConnectivityCheckList is a collection of PodNetworkConnectivityCheck\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "Items contains the items", +} + +func (PodNetworkConnectivityCheckList) SwaggerDoc() map[string]string { + return map_PodNetworkConnectivityCheckList +} + +var map_PodNetworkConnectivityCheckSpec = map[string]string{ + "sourcePod": "SourcePod names the pod from which the condition will be checked", + "targetEndpoint": "EndpointAddress to check. A TCP address of the form host:port. Note that if host is a DNS name, then the check would fail if the DNS name cannot be resolved. Specify an IP address for host to bypass DNS name lookup.", + "tlsClientCert": "TLSClientCert, if specified, references a kubernetes.io/tls type secret with 'tls.crt' and 'tls.key' entries containing an optional TLS client certificate and key to be used when checking endpoints that require a client certificate in order to gracefully preform the scan without causing excessive logging in the endpoint process. The secret must exist in the same namespace as this resource.", +} + +func (PodNetworkConnectivityCheckSpec) SwaggerDoc() map[string]string { + return map_PodNetworkConnectivityCheckSpec +} + +var map_PodNetworkConnectivityCheckStatus = map[string]string{ + "successes": "Successes contains logs successful check actions", + "failures": "Failures contains logs of unsuccessful check actions", + "outages": "Outages contains logs of time periods of outages", + "conditions": "Conditions summarize the status of the check", +} + +func (PodNetworkConnectivityCheckStatus) SwaggerDoc() map[string]string { + return map_PodNetworkConnectivityCheckStatus +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/api/osin/install.go b/vendor/github.com/openshift/api/osin/install.go new file mode 100644 index 000000000..3f773985b --- /dev/null +++ b/vendor/github.com/openshift/api/osin/install.go @@ -0,0 +1,26 @@ +package osin + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + osinv1 "github.com/openshift/api/osin/v1" +) + +const ( + GroupName = "osin.config.openshift.io" +) + +var ( + schemeBuilder = runtime.NewSchemeBuilder(osinv1.Install) + // Install is a function which adds every version of this group to a scheme + Install = schemeBuilder.AddToScheme +) + +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +func Kind(kind string) schema.GroupKind { + return schema.GroupKind{Group: GroupName, Kind: kind} +} diff --git a/vendor/github.com/openshift/api/osin/v1/doc.go b/vendor/github.com/openshift/api/osin/v1/doc.go new file mode 100644 index 000000000..b74dfc48a --- /dev/null +++ b/vendor/github.com/openshift/api/osin/v1/doc.go @@ -0,0 +1,7 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true + +// +groupName=osin.config.openshift.io +// Package v1 is the v1 version of the API. +package v1 diff --git a/vendor/github.com/openshift/api/osin/v1/register.go b/vendor/github.com/openshift/api/osin/v1/register.go new file mode 100644 index 000000000..4d54a5df4 --- /dev/null +++ b/vendor/github.com/openshift/api/osin/v1/register.go @@ -0,0 +1,50 @@ +package v1 + +import ( + configv1 "github.com/openshift/api/config/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + GroupName = "osin.config.openshift.io" + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, configv1.Install) + // Install is a function which adds this version to a scheme + Install = schemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = schemeBuilder.AddToScheme +) + +// Resource generated code relies on this being here, but it logically belongs to the group +// DEPRECATED +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &OsinServerConfig{}, + + &BasicAuthPasswordIdentityProvider{}, + &AllowAllPasswordIdentityProvider{}, + &DenyAllPasswordIdentityProvider{}, + &HTPasswdPasswordIdentityProvider{}, + &LDAPPasswordIdentityProvider{}, + &KeystonePasswordIdentityProvider{}, + &RequestHeaderIdentityProvider{}, + &GitHubIdentityProvider{}, + &GitLabIdentityProvider{}, + &GoogleIdentityProvider{}, + &OpenIDIdentityProvider{}, + + &SessionSecrets{}, + ) + return nil +} diff --git a/vendor/github.com/openshift/api/osin/v1/types.go b/vendor/github.com/openshift/api/osin/v1/types.go new file mode 100644 index 000000000..0ea4be1ba --- /dev/null +++ b/vendor/github.com/openshift/api/osin/v1/types.go @@ -0,0 +1,488 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + + configv1 "github.com/openshift/api/config/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +// +openshift:compatibility-gen:internal +type OsinServerConfig struct { + metav1.TypeMeta `json:",inline"` + + // provides the standard apiserver configuration + configv1.GenericAPIServerConfig `json:",inline"` + + // oauthConfig holds the necessary configuration options for OAuth authentication + OAuthConfig OAuthConfig `json:"oauthConfig"` +} + +// OAuthConfig holds the necessary configuration options for OAuth authentication +type OAuthConfig struct { + // masterCA is the CA for verifying the TLS connection back to the MasterURL. + // This field is deprecated and will be removed in a future release. + // See loginURL for details. + // Deprecated + MasterCA *string `json:"masterCA"` + + // masterURL is used for making server-to-server calls to exchange authorization codes for access tokens + // This field is deprecated and will be removed in a future release. + // See loginURL for details. + // Deprecated + MasterURL string `json:"masterURL"` + + // masterPublicURL is used for building valid client redirect URLs for internal and external access + // This field is deprecated and will be removed in a future release. + // See loginURL for details. + // Deprecated + MasterPublicURL string `json:"masterPublicURL"` + + // loginURL, along with masterCA, masterURL and masterPublicURL have distinct + // meanings depending on how the OAuth server is run. The two states are: + // 1. embedded in the kube api server (all 3.x releases) + // 2. as a standalone external process (all 4.x releases) + // in the embedded configuration, loginURL is equivalent to masterPublicURL + // and the other fields have functionality that matches their docs. + // in the standalone configuration, the fields are used as: + // loginURL is the URL required to login to the cluster: + // oc login --server= + // masterPublicURL is the issuer URL + // it is accessible from inside (service network) and outside (ingress) of the cluster + // masterURL is the loopback variation of the token_endpoint URL with no path component + // it is only accessible from inside (service network) of the cluster + // masterCA is used to perform TLS verification for connections made to masterURL + // For further details, see the IETF Draft: + // https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2 + LoginURL string `json:"loginURL"` + + // assetPublicURL is used for building valid client redirect URLs for external access + AssetPublicURL string `json:"assetPublicURL"` + + // alwaysShowProviderSelection will force the provider selection page to render even when there is only a single provider. + AlwaysShowProviderSelection bool `json:"alwaysShowProviderSelection"` + + //identityProviders is an ordered list of ways for a user to identify themselves + IdentityProviders []IdentityProvider `json:"identityProviders"` + + // grantConfig describes how to handle grants + GrantConfig GrantConfig `json:"grantConfig"` + + // sessionConfig hold information about configuring sessions. + SessionConfig *SessionConfig `json:"sessionConfig"` + + // tokenConfig contains options for authorization and access tokens + TokenConfig TokenConfig `json:"tokenConfig"` + + // templates allow you to customize pages like the login page. + Templates *OAuthTemplates `json:"templates"` +} + +// OAuthTemplates allow for customization of pages like the login page +type OAuthTemplates struct { + // login is a path to a file containing a go template used to render the login page. + // If unspecified, the default login page is used. + Login string `json:"login"` + + // providerSelection is a path to a file containing a go template used to render the provider selection page. + // If unspecified, the default provider selection page is used. + ProviderSelection string `json:"providerSelection"` + + // error is a path to a file containing a go template used to render error pages during the authentication or grant flow + // If unspecified, the default error page is used. + Error string `json:"error"` +} + +// IdentityProvider provides identities for users authenticating using credentials +type IdentityProvider struct { + // name is used to qualify the identities returned by this provider + Name string `json:"name"` + // challenge indicates whether to issue WWW-Authenticate challenges for this provider + UseAsChallenger bool `json:"challenge"` + // login indicates whether to use this identity provider for unauthenticated browsers to login against + UseAsLogin bool `json:"login"` + // mappingMethod determines how identities from this provider are mapped to users + MappingMethod string `json:"mappingMethod"` + // provider contains the information about how to set up a specific identity provider + // +kubebuilder:pruning:PreserveUnknownFields + Provider runtime.RawExtension `json:"provider"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// BasicAuthPasswordIdentityProvider provides identities for users authenticating using HTTP basic auth credentials +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +// +openshift:compatibility-gen:internal +type BasicAuthPasswordIdentityProvider struct { + metav1.TypeMeta `json:",inline"` + + // RemoteConnectionInfo contains information about how to connect to the external basic auth server + configv1.RemoteConnectionInfo `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// AllowAllPasswordIdentityProvider provides identities for users authenticating using non-empty passwords +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +// +openshift:compatibility-gen:internal +type AllowAllPasswordIdentityProvider struct { + metav1.TypeMeta `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DenyAllPasswordIdentityProvider provides no identities for users +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +// +openshift:compatibility-gen:internal +type DenyAllPasswordIdentityProvider struct { + metav1.TypeMeta `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// HTPasswdPasswordIdentityProvider provides identities for users authenticating using htpasswd credentials +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +// +openshift:compatibility-gen:internal +type HTPasswdPasswordIdentityProvider struct { + metav1.TypeMeta `json:",inline"` + + // file is a reference to your htpasswd file + File string `json:"file"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// LDAPPasswordIdentityProvider provides identities for users authenticating using LDAP credentials +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +// +openshift:compatibility-gen:internal +type LDAPPasswordIdentityProvider struct { + metav1.TypeMeta `json:",inline"` + // url is an RFC 2255 URL which specifies the LDAP search parameters to use. The syntax of the URL is + // ldap://host:port/basedn?attribute?scope?filter + URL string `json:"url"` + // bindDN is an optional DN to bind with during the search phase. + BindDN string `json:"bindDN"` + // bindPassword is an optional password to bind with during the search phase. + BindPassword configv1.StringSource `json:"bindPassword"` + + // insecure, if true, indicates the connection should not use TLS. + // Cannot be set to true with a URL scheme of "ldaps://" + // If false, "ldaps://" URLs connect using TLS, and "ldap://" URLs are upgraded to a TLS connection using StartTLS as specified in https://tools.ietf.org/html/rfc2830 + Insecure bool `json:"insecure"` + // ca is the optional trusted certificate authority bundle to use when making requests to the server + // If empty, the default system roots are used + CA string `json:"ca"` + // attributes maps LDAP attributes to identities + Attributes LDAPAttributeMapping `json:"attributes"` +} + +// LDAPAttributeMapping maps LDAP attributes to OpenShift identity fields +type LDAPAttributeMapping struct { + // id is the list of attributes whose values should be used as the user ID. Required. + // LDAP standard identity attribute is "dn" + ID []string `json:"id"` + // preferredUsername is the list of attributes whose values should be used as the preferred username. + // LDAP standard login attribute is "uid" + PreferredUsername []string `json:"preferredUsername"` + // name is the list of attributes whose values should be used as the display name. Optional. + // If unspecified, no display name is set for the identity + // LDAP standard display name attribute is "cn" + Name []string `json:"name"` + // email is the list of attributes whose values should be used as the email address. Optional. + // If unspecified, no email is set for the identity + Email []string `json:"email"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// KeystonePasswordIdentityProvider provides identities for users authenticating using keystone password credentials +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +// +openshift:compatibility-gen:internal +type KeystonePasswordIdentityProvider struct { + metav1.TypeMeta `json:",inline"` + // RemoteConnectionInfo contains information about how to connect to the keystone server + configv1.RemoteConnectionInfo `json:",inline"` + // domainName is required for keystone v3 + DomainName string `json:"domainName"` + // useKeystoneIdentity flag indicates that user should be authenticated by keystone ID, not by username + UseKeystoneIdentity bool `json:"useKeystoneIdentity"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RequestHeaderIdentityProvider provides identities for users authenticating using request header credentials +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +// +openshift:compatibility-gen:internal +type RequestHeaderIdentityProvider struct { + metav1.TypeMeta `json:",inline"` + + // loginURL is a URL to redirect unauthenticated /authorize requests to + // Unauthenticated requests from OAuth clients which expect interactive logins will be redirected here + // ${url} is replaced with the current URL, escaped to be safe in a query parameter + // https://www.example.com/sso-login?then=${url} + // ${query} is replaced with the current query string + // https://www.example.com/auth-proxy/oauth/authorize?${query} + LoginURL string `json:"loginURL"` + + // challengeURL is a URL to redirect unauthenticated /authorize requests to + // Unauthenticated requests from OAuth clients which expect WWW-Authenticate challenges will be redirected here + // ${url} is replaced with the current URL, escaped to be safe in a query parameter + // https://www.example.com/sso-login?then=${url} + // ${query} is replaced with the current query string + // https://www.example.com/auth-proxy/oauth/authorize?${query} + ChallengeURL string `json:"challengeURL"` + + // clientCA is a file with the trusted signer certs. If empty, no request verification is done, and any direct request to the OAuth server can impersonate any identity from this provider, merely by setting a request header. + ClientCA string `json:"clientCA"` + // clientCommonNames is an optional list of common names to require a match from. If empty, any client certificate validated against the clientCA bundle is considered authoritative. + ClientCommonNames []string `json:"clientCommonNames"` + + // headers is the set of headers to check for identity information + Headers []string `json:"headers"` + // preferredUsernameHeaders is the set of headers to check for the preferred username + PreferredUsernameHeaders []string `json:"preferredUsernameHeaders"` + // nameHeaders is the set of headers to check for the display name + NameHeaders []string `json:"nameHeaders"` + // emailHeaders is the set of headers to check for the email address + EmailHeaders []string `json:"emailHeaders"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// GitHubIdentityProvider provides identities for users authenticating using GitHub credentials +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +// +openshift:compatibility-gen:internal +type GitHubIdentityProvider struct { + metav1.TypeMeta `json:",inline"` + + // clientID is the oauth client ID + ClientID string `json:"clientID"` + // clientSecret is the oauth client secret + ClientSecret configv1.StringSource `json:"clientSecret"` + // organizations optionally restricts which organizations are allowed to log in + Organizations []string `json:"organizations"` + // teams optionally restricts which teams are allowed to log in. Format is /. + Teams []string `json:"teams"` + // hostname is the optional domain (e.g. "mycompany.com") for use with a hosted instance of GitHub Enterprise. + // It must match the GitHub Enterprise settings value that is configured at /setup/settings#hostname. + Hostname string `json:"hostname"` + // ca is the optional trusted certificate authority bundle to use when making requests to the server. + // If empty, the default system roots are used. This can only be configured when hostname is set to a non-empty value. + CA string `json:"ca"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// GitLabIdentityProvider provides identities for users authenticating using GitLab credentials +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +// +openshift:compatibility-gen:internal +type GitLabIdentityProvider struct { + metav1.TypeMeta `json:",inline"` + + // ca is the optional trusted certificate authority bundle to use when making requests to the server + // If empty, the default system roots are used + CA string `json:"ca"` + // url is the oauth server base URL + URL string `json:"url"` + // clientID is the oauth client ID + ClientID string `json:"clientID"` + // clientSecret is the oauth client secret + ClientSecret configv1.StringSource `json:"clientSecret"` + // legacy determines if OAuth2 or OIDC should be used + // If true, OAuth2 is used + // If false, OIDC is used + // If nil and the URL's host is gitlab.com, OIDC is used + // Otherwise, OAuth2 is used + // In a future release, nil will default to using OIDC + // Eventually this flag will be removed and only OIDC will be used + Legacy *bool `json:"legacy,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// GoogleIdentityProvider provides identities for users authenticating using Google credentials +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +// +openshift:compatibility-gen:internal +type GoogleIdentityProvider struct { + metav1.TypeMeta `json:",inline"` + + // clientID is the oauth client ID + ClientID string `json:"clientID"` + // clientSecret is the oauth client secret + ClientSecret configv1.StringSource `json:"clientSecret"` + + // hostedDomain is the optional Google App domain (e.g. "mycompany.com") to restrict logins to + HostedDomain string `json:"hostedDomain"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// OpenIDIdentityProvider provides identities for users authenticating using OpenID credentials +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +// +openshift:compatibility-gen:internal +type OpenIDIdentityProvider struct { + metav1.TypeMeta `json:",inline"` + + // ca is the optional trusted certificate authority bundle to use when making requests to the server + // If empty, the default system roots are used + CA string `json:"ca"` + + // clientID is the oauth client ID + ClientID string `json:"clientID"` + // clientSecret is the oauth client secret + ClientSecret configv1.StringSource `json:"clientSecret"` + + // extraScopes are any scopes to request in addition to the standard "openid" scope. + ExtraScopes []string `json:"extraScopes"` + + // extraAuthorizeParameters are any custom parameters to add to the authorize request. + ExtraAuthorizeParameters map[string]string `json:"extraAuthorizeParameters"` + + // urls to use to authenticate + URLs OpenIDURLs `json:"urls"` + + // claims mappings + Claims OpenIDClaims `json:"claims"` +} + +// OpenIDURLs are URLs to use when authenticating with an OpenID identity provider +type OpenIDURLs struct { + // authorize is the oauth authorization URL + Authorize string `json:"authorize"` + // token is the oauth token granting URL + Token string `json:"token"` + // userInfo is the optional userinfo URL. + // If present, a granted access_token is used to request claims + // If empty, a granted id_token is parsed for claims + UserInfo string `json:"userInfo"` +} + +// OpenIDClaims contains a list of OpenID claims to use when authenticating with an OpenID identity provider +type OpenIDClaims struct { + // id is the list of claims whose values should be used as the user ID. Required. + // OpenID standard identity claim is "sub" + ID []string `json:"id"` + // preferredUsername is the list of claims whose values should be used as the preferred username. + // If unspecified, the preferred username is determined from the value of the id claim + PreferredUsername []string `json:"preferredUsername"` + // name is the list of claims whose values should be used as the display name. Optional. + // If unspecified, no display name is set for the identity + Name []string `json:"name"` + // email is the list of claims whose values should be used as the email address. Optional. + // If unspecified, no email is set for the identity + Email []string `json:"email"` + // groups is the list of claims value of which should be used to synchronize groups + // from the OIDC provider to OpenShift for the user + Groups []string `json:"groups"` +} + +// GrantConfig holds the necessary configuration options for grant handlers +type GrantConfig struct { + // method determines the default strategy to use when an OAuth client requests a grant. + // This method will be used only if the specific OAuth client doesn't provide a strategy + // of their own. Valid grant handling methods are: + // - auto: always approves grant requests, useful for trusted clients + // - prompt: prompts the end user for approval of grant requests, useful for third-party clients + // - deny: always denies grant requests, useful for black-listed clients + Method GrantHandlerType `json:"method"` + + // serviceAccountMethod is used for determining client authorization for service account oauth client. + // It must be either: deny, prompt + ServiceAccountMethod GrantHandlerType `json:"serviceAccountMethod"` +} + +type GrantHandlerType string + +const ( + // auto auto-approves client authorization grant requests + GrantHandlerAuto GrantHandlerType = "auto" + // prompt prompts the user to approve new client authorization grant requests + GrantHandlerPrompt GrantHandlerType = "prompt" + // deny auto-denies client authorization grant requests + GrantHandlerDeny GrantHandlerType = "deny" +) + +// SessionConfig specifies options for cookie-based sessions. Used by AuthRequestHandlerSession +type SessionConfig struct { + // sessionSecretsFile is a reference to a file containing a serialized SessionSecrets object + // If no file is specified, a random signing and encryption key are generated at each server start + SessionSecretsFile string `json:"sessionSecretsFile"` + // sessionMaxAgeSeconds specifies how long created sessions last. Used by AuthRequestHandlerSession + SessionMaxAgeSeconds int32 `json:"sessionMaxAgeSeconds"` + // sessionName is the cookie name used to store the session + SessionName string `json:"sessionName"` +} + +// TokenConfig holds the necessary configuration options for authorization and access tokens +type TokenConfig struct { + // authorizeTokenMaxAgeSeconds defines the maximum age of authorize tokens + AuthorizeTokenMaxAgeSeconds int32 `json:"authorizeTokenMaxAgeSeconds,omitempty"` + // accessTokenMaxAgeSeconds defines the maximum age of access tokens + AccessTokenMaxAgeSeconds int32 `json:"accessTokenMaxAgeSeconds,omitempty"` + // accessTokenInactivityTimeoutSeconds - DEPRECATED: setting this field has no effect. + // +optional + AccessTokenInactivityTimeoutSeconds *int32 `json:"accessTokenInactivityTimeoutSeconds,omitempty"` + // accessTokenInactivityTimeout defines the token inactivity timeout + // for tokens granted by any client. + // The value represents the maximum amount of time that can occur between + // consecutive uses of the token. Tokens become invalid if they are not + // used within this temporal window. The user will need to acquire a new + // token to regain access once a token times out. Takes valid time + // duration string such as "5m", "1.5h" or "2h45m". The minimum allowed + // value for duration is 300s (5 minutes). If the timeout is configured + // per client, then that value takes precedence. If the timeout value is + // not specified and the client does not override the value, then tokens + // are valid until their lifetime. + // +optional + AccessTokenInactivityTimeout *metav1.Duration `json:"accessTokenInactivityTimeout,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// SessionSecrets list the secrets to use to sign/encrypt and authenticate/decrypt created sessions. +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +// +openshift:compatibility-gen:internal +type SessionSecrets struct { + metav1.TypeMeta `json:",inline"` + + // Secrets is a list of secrets + // New sessions are signed and encrypted using the first secret. + // Existing sessions are decrypted/authenticated by each secret until one succeeds. This allows rotating secrets. + Secrets []SessionSecret `json:"secrets"` +} + +// SessionSecret is a secret used to authenticate/decrypt cookie-based sessions +type SessionSecret struct { + // Authentication is used to authenticate sessions using HMAC. Recommended to use a secret with 32 or 64 bytes. + Authentication string `json:"authentication"` + // Encryption is used to encrypt sessions. Must be 16, 24, or 32 characters long, to select AES-128, AES- + Encryption string `json:"encryption"` +} diff --git a/vendor/github.com/openshift/api/osin/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/osin/v1/zz_generated.deepcopy.go new file mode 100644 index 000000000..cb90b8365 --- /dev/null +++ b/vendor/github.com/openshift/api/osin/v1/zz_generated.deepcopy.go @@ -0,0 +1,645 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AllowAllPasswordIdentityProvider) DeepCopyInto(out *AllowAllPasswordIdentityProvider) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowAllPasswordIdentityProvider. +func (in *AllowAllPasswordIdentityProvider) DeepCopy() *AllowAllPasswordIdentityProvider { + if in == nil { + return nil + } + out := new(AllowAllPasswordIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AllowAllPasswordIdentityProvider) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BasicAuthPasswordIdentityProvider) DeepCopyInto(out *BasicAuthPasswordIdentityProvider) { + *out = *in + out.TypeMeta = in.TypeMeta + out.RemoteConnectionInfo = in.RemoteConnectionInfo + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BasicAuthPasswordIdentityProvider. +func (in *BasicAuthPasswordIdentityProvider) DeepCopy() *BasicAuthPasswordIdentityProvider { + if in == nil { + return nil + } + out := new(BasicAuthPasswordIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BasicAuthPasswordIdentityProvider) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DenyAllPasswordIdentityProvider) DeepCopyInto(out *DenyAllPasswordIdentityProvider) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DenyAllPasswordIdentityProvider. +func (in *DenyAllPasswordIdentityProvider) DeepCopy() *DenyAllPasswordIdentityProvider { + if in == nil { + return nil + } + out := new(DenyAllPasswordIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DenyAllPasswordIdentityProvider) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitHubIdentityProvider) DeepCopyInto(out *GitHubIdentityProvider) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ClientSecret = in.ClientSecret + if in.Organizations != nil { + in, out := &in.Organizations, &out.Organizations + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Teams != nil { + in, out := &in.Teams, &out.Teams + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitHubIdentityProvider. +func (in *GitHubIdentityProvider) DeepCopy() *GitHubIdentityProvider { + if in == nil { + return nil + } + out := new(GitHubIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GitHubIdentityProvider) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitLabIdentityProvider) DeepCopyInto(out *GitLabIdentityProvider) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ClientSecret = in.ClientSecret + if in.Legacy != nil { + in, out := &in.Legacy, &out.Legacy + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitLabIdentityProvider. +func (in *GitLabIdentityProvider) DeepCopy() *GitLabIdentityProvider { + if in == nil { + return nil + } + out := new(GitLabIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GitLabIdentityProvider) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GoogleIdentityProvider) DeepCopyInto(out *GoogleIdentityProvider) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ClientSecret = in.ClientSecret + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GoogleIdentityProvider. +func (in *GoogleIdentityProvider) DeepCopy() *GoogleIdentityProvider { + if in == nil { + return nil + } + out := new(GoogleIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GoogleIdentityProvider) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GrantConfig) DeepCopyInto(out *GrantConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GrantConfig. +func (in *GrantConfig) DeepCopy() *GrantConfig { + if in == nil { + return nil + } + out := new(GrantConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTPasswdPasswordIdentityProvider) DeepCopyInto(out *HTPasswdPasswordIdentityProvider) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTPasswdPasswordIdentityProvider. +func (in *HTPasswdPasswordIdentityProvider) DeepCopy() *HTPasswdPasswordIdentityProvider { + if in == nil { + return nil + } + out := new(HTPasswdPasswordIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HTPasswdPasswordIdentityProvider) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityProvider) DeepCopyInto(out *IdentityProvider) { + *out = *in + in.Provider.DeepCopyInto(&out.Provider) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityProvider. +func (in *IdentityProvider) DeepCopy() *IdentityProvider { + if in == nil { + return nil + } + out := new(IdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KeystonePasswordIdentityProvider) DeepCopyInto(out *KeystonePasswordIdentityProvider) { + *out = *in + out.TypeMeta = in.TypeMeta + out.RemoteConnectionInfo = in.RemoteConnectionInfo + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KeystonePasswordIdentityProvider. +func (in *KeystonePasswordIdentityProvider) DeepCopy() *KeystonePasswordIdentityProvider { + if in == nil { + return nil + } + out := new(KeystonePasswordIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *KeystonePasswordIdentityProvider) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LDAPAttributeMapping) DeepCopyInto(out *LDAPAttributeMapping) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.PreferredUsername != nil { + in, out := &in.PreferredUsername, &out.PreferredUsername + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Email != nil { + in, out := &in.Email, &out.Email + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LDAPAttributeMapping. +func (in *LDAPAttributeMapping) DeepCopy() *LDAPAttributeMapping { + if in == nil { + return nil + } + out := new(LDAPAttributeMapping) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LDAPPasswordIdentityProvider) DeepCopyInto(out *LDAPPasswordIdentityProvider) { + *out = *in + out.TypeMeta = in.TypeMeta + out.BindPassword = in.BindPassword + in.Attributes.DeepCopyInto(&out.Attributes) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LDAPPasswordIdentityProvider. +func (in *LDAPPasswordIdentityProvider) DeepCopy() *LDAPPasswordIdentityProvider { + if in == nil { + return nil + } + out := new(LDAPPasswordIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LDAPPasswordIdentityProvider) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OAuthConfig) DeepCopyInto(out *OAuthConfig) { + *out = *in + if in.MasterCA != nil { + in, out := &in.MasterCA, &out.MasterCA + *out = new(string) + **out = **in + } + if in.IdentityProviders != nil { + in, out := &in.IdentityProviders, &out.IdentityProviders + *out = make([]IdentityProvider, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + out.GrantConfig = in.GrantConfig + if in.SessionConfig != nil { + in, out := &in.SessionConfig, &out.SessionConfig + *out = new(SessionConfig) + **out = **in + } + in.TokenConfig.DeepCopyInto(&out.TokenConfig) + if in.Templates != nil { + in, out := &in.Templates, &out.Templates + *out = new(OAuthTemplates) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthConfig. +func (in *OAuthConfig) DeepCopy() *OAuthConfig { + if in == nil { + return nil + } + out := new(OAuthConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OAuthTemplates) DeepCopyInto(out *OAuthTemplates) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OAuthTemplates. +func (in *OAuthTemplates) DeepCopy() *OAuthTemplates { + if in == nil { + return nil + } + out := new(OAuthTemplates) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenIDClaims) DeepCopyInto(out *OpenIDClaims) { + *out = *in + if in.ID != nil { + in, out := &in.ID, &out.ID + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.PreferredUsername != nil { + in, out := &in.PreferredUsername, &out.PreferredUsername + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Name != nil { + in, out := &in.Name, &out.Name + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Email != nil { + in, out := &in.Email, &out.Email + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Groups != nil { + in, out := &in.Groups, &out.Groups + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenIDClaims. +func (in *OpenIDClaims) DeepCopy() *OpenIDClaims { + if in == nil { + return nil + } + out := new(OpenIDClaims) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenIDIdentityProvider) DeepCopyInto(out *OpenIDIdentityProvider) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ClientSecret = in.ClientSecret + if in.ExtraScopes != nil { + in, out := &in.ExtraScopes, &out.ExtraScopes + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExtraAuthorizeParameters != nil { + in, out := &in.ExtraAuthorizeParameters, &out.ExtraAuthorizeParameters + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + out.URLs = in.URLs + in.Claims.DeepCopyInto(&out.Claims) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenIDIdentityProvider. +func (in *OpenIDIdentityProvider) DeepCopy() *OpenIDIdentityProvider { + if in == nil { + return nil + } + out := new(OpenIDIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OpenIDIdentityProvider) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OpenIDURLs) DeepCopyInto(out *OpenIDURLs) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OpenIDURLs. +func (in *OpenIDURLs) DeepCopy() *OpenIDURLs { + if in == nil { + return nil + } + out := new(OpenIDURLs) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OsinServerConfig) DeepCopyInto(out *OsinServerConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + in.GenericAPIServerConfig.DeepCopyInto(&out.GenericAPIServerConfig) + in.OAuthConfig.DeepCopyInto(&out.OAuthConfig) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OsinServerConfig. +func (in *OsinServerConfig) DeepCopy() *OsinServerConfig { + if in == nil { + return nil + } + out := new(OsinServerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *OsinServerConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequestHeaderIdentityProvider) DeepCopyInto(out *RequestHeaderIdentityProvider) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.ClientCommonNames != nil { + in, out := &in.ClientCommonNames, &out.ClientCommonNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.PreferredUsernameHeaders != nil { + in, out := &in.PreferredUsernameHeaders, &out.PreferredUsernameHeaders + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.NameHeaders != nil { + in, out := &in.NameHeaders, &out.NameHeaders + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.EmailHeaders != nil { + in, out := &in.EmailHeaders, &out.EmailHeaders + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestHeaderIdentityProvider. +func (in *RequestHeaderIdentityProvider) DeepCopy() *RequestHeaderIdentityProvider { + if in == nil { + return nil + } + out := new(RequestHeaderIdentityProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RequestHeaderIdentityProvider) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SessionConfig) DeepCopyInto(out *SessionConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SessionConfig. +func (in *SessionConfig) DeepCopy() *SessionConfig { + if in == nil { + return nil + } + out := new(SessionConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SessionSecret) DeepCopyInto(out *SessionSecret) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SessionSecret. +func (in *SessionSecret) DeepCopy() *SessionSecret { + if in == nil { + return nil + } + out := new(SessionSecret) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SessionSecrets) DeepCopyInto(out *SessionSecrets) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Secrets != nil { + in, out := &in.Secrets, &out.Secrets + *out = make([]SessionSecret, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SessionSecrets. +func (in *SessionSecrets) DeepCopy() *SessionSecrets { + if in == nil { + return nil + } + out := new(SessionSecrets) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SessionSecrets) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TokenConfig) DeepCopyInto(out *TokenConfig) { + *out = *in + if in.AccessTokenInactivityTimeoutSeconds != nil { + in, out := &in.AccessTokenInactivityTimeoutSeconds, &out.AccessTokenInactivityTimeoutSeconds + *out = new(int32) + **out = **in + } + if in.AccessTokenInactivityTimeout != nil { + in, out := &in.AccessTokenInactivityTimeout, &out.AccessTokenInactivityTimeout + *out = new(metav1.Duration) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenConfig. +func (in *TokenConfig) DeepCopy() *TokenConfig { + if in == nil { + return nil + } + out := new(TokenConfig) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/api/osin/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/osin/v1/zz_generated.swagger_doc_generated.go new file mode 100644 index 000000000..0bffa8265 --- /dev/null +++ b/vendor/github.com/openshift/api/osin/v1/zz_generated.swagger_doc_generated.go @@ -0,0 +1,280 @@ +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_AllowAllPasswordIdentityProvider = map[string]string{ + "": "AllowAllPasswordIdentityProvider provides identities for users authenticating using non-empty passwords\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", +} + +func (AllowAllPasswordIdentityProvider) SwaggerDoc() map[string]string { + return map_AllowAllPasswordIdentityProvider +} + +var map_BasicAuthPasswordIdentityProvider = map[string]string{ + "": "BasicAuthPasswordIdentityProvider provides identities for users authenticating using HTTP basic auth credentials\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", +} + +func (BasicAuthPasswordIdentityProvider) SwaggerDoc() map[string]string { + return map_BasicAuthPasswordIdentityProvider +} + +var map_DenyAllPasswordIdentityProvider = map[string]string{ + "": "DenyAllPasswordIdentityProvider provides no identities for users\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", +} + +func (DenyAllPasswordIdentityProvider) SwaggerDoc() map[string]string { + return map_DenyAllPasswordIdentityProvider +} + +var map_GitHubIdentityProvider = map[string]string{ + "": "GitHubIdentityProvider provides identities for users authenticating using GitHub credentials\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "clientID": "clientID is the oauth client ID", + "clientSecret": "clientSecret is the oauth client secret", + "organizations": "organizations optionally restricts which organizations are allowed to log in", + "teams": "teams optionally restricts which teams are allowed to log in. Format is /.", + "hostname": "hostname is the optional domain (e.g. \"mycompany.com\") for use with a hosted instance of GitHub Enterprise. It must match the GitHub Enterprise settings value that is configured at /setup/settings#hostname.", + "ca": "ca is the optional trusted certificate authority bundle to use when making requests to the server. If empty, the default system roots are used. This can only be configured when hostname is set to a non-empty value.", +} + +func (GitHubIdentityProvider) SwaggerDoc() map[string]string { + return map_GitHubIdentityProvider +} + +var map_GitLabIdentityProvider = map[string]string{ + "": "GitLabIdentityProvider provides identities for users authenticating using GitLab credentials\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "ca": "ca is the optional trusted certificate authority bundle to use when making requests to the server If empty, the default system roots are used", + "url": "url is the oauth server base URL", + "clientID": "clientID is the oauth client ID", + "clientSecret": "clientSecret is the oauth client secret", + "legacy": "legacy determines if OAuth2 or OIDC should be used If true, OAuth2 is used If false, OIDC is used If nil and the URL's host is gitlab.com, OIDC is used Otherwise, OAuth2 is used In a future release, nil will default to using OIDC Eventually this flag will be removed and only OIDC will be used", +} + +func (GitLabIdentityProvider) SwaggerDoc() map[string]string { + return map_GitLabIdentityProvider +} + +var map_GoogleIdentityProvider = map[string]string{ + "": "GoogleIdentityProvider provides identities for users authenticating using Google credentials\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "clientID": "clientID is the oauth client ID", + "clientSecret": "clientSecret is the oauth client secret", + "hostedDomain": "hostedDomain is the optional Google App domain (e.g. \"mycompany.com\") to restrict logins to", +} + +func (GoogleIdentityProvider) SwaggerDoc() map[string]string { + return map_GoogleIdentityProvider +} + +var map_GrantConfig = map[string]string{ + "": "GrantConfig holds the necessary configuration options for grant handlers", + "method": "method determines the default strategy to use when an OAuth client requests a grant. This method will be used only if the specific OAuth client doesn't provide a strategy of their own. Valid grant handling methods are:\n - auto: always approves grant requests, useful for trusted clients\n - prompt: prompts the end user for approval of grant requests, useful for third-party clients\n - deny: always denies grant requests, useful for black-listed clients", + "serviceAccountMethod": "serviceAccountMethod is used for determining client authorization for service account oauth client. It must be either: deny, prompt", +} + +func (GrantConfig) SwaggerDoc() map[string]string { + return map_GrantConfig +} + +var map_HTPasswdPasswordIdentityProvider = map[string]string{ + "": "HTPasswdPasswordIdentityProvider provides identities for users authenticating using htpasswd credentials\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "file": "file is a reference to your htpasswd file", +} + +func (HTPasswdPasswordIdentityProvider) SwaggerDoc() map[string]string { + return map_HTPasswdPasswordIdentityProvider +} + +var map_IdentityProvider = map[string]string{ + "": "IdentityProvider provides identities for users authenticating using credentials", + "name": "name is used to qualify the identities returned by this provider", + "challenge": "challenge indicates whether to issue WWW-Authenticate challenges for this provider", + "login": "login indicates whether to use this identity provider for unauthenticated browsers to login against", + "mappingMethod": "mappingMethod determines how identities from this provider are mapped to users", + "provider": "provider contains the information about how to set up a specific identity provider", +} + +func (IdentityProvider) SwaggerDoc() map[string]string { + return map_IdentityProvider +} + +var map_KeystonePasswordIdentityProvider = map[string]string{ + "": "KeystonePasswordIdentityProvider provides identities for users authenticating using keystone password credentials\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "domainName": "domainName is required for keystone v3", + "useKeystoneIdentity": "useKeystoneIdentity flag indicates that user should be authenticated by keystone ID, not by username", +} + +func (KeystonePasswordIdentityProvider) SwaggerDoc() map[string]string { + return map_KeystonePasswordIdentityProvider +} + +var map_LDAPAttributeMapping = map[string]string{ + "": "LDAPAttributeMapping maps LDAP attributes to OpenShift identity fields", + "id": "id is the list of attributes whose values should be used as the user ID. Required. LDAP standard identity attribute is \"dn\"", + "preferredUsername": "preferredUsername is the list of attributes whose values should be used as the preferred username. LDAP standard login attribute is \"uid\"", + "name": "name is the list of attributes whose values should be used as the display name. Optional. If unspecified, no display name is set for the identity LDAP standard display name attribute is \"cn\"", + "email": "email is the list of attributes whose values should be used as the email address. Optional. If unspecified, no email is set for the identity", +} + +func (LDAPAttributeMapping) SwaggerDoc() map[string]string { + return map_LDAPAttributeMapping +} + +var map_LDAPPasswordIdentityProvider = map[string]string{ + "": "LDAPPasswordIdentityProvider provides identities for users authenticating using LDAP credentials\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "url": "url is an RFC 2255 URL which specifies the LDAP search parameters to use. The syntax of the URL is\n ldap://host:port/basedn?attribute?scope?filter", + "bindDN": "bindDN is an optional DN to bind with during the search phase.", + "bindPassword": "bindPassword is an optional password to bind with during the search phase.", + "insecure": "insecure, if true, indicates the connection should not use TLS. Cannot be set to true with a URL scheme of \"ldaps://\" If false, \"ldaps://\" URLs connect using TLS, and \"ldap://\" URLs are upgraded to a TLS connection using StartTLS as specified in https://tools.ietf.org/html/rfc2830", + "ca": "ca is the optional trusted certificate authority bundle to use when making requests to the server If empty, the default system roots are used", + "attributes": "attributes maps LDAP attributes to identities", +} + +func (LDAPPasswordIdentityProvider) SwaggerDoc() map[string]string { + return map_LDAPPasswordIdentityProvider +} + +var map_OAuthConfig = map[string]string{ + "": "OAuthConfig holds the necessary configuration options for OAuth authentication", + "masterCA": "masterCA is the CA for verifying the TLS connection back to the MasterURL. This field is deprecated and will be removed in a future release. See loginURL for details. Deprecated", + "masterURL": "masterURL is used for making server-to-server calls to exchange authorization codes for access tokens This field is deprecated and will be removed in a future release. See loginURL for details. Deprecated", + "masterPublicURL": "masterPublicURL is used for building valid client redirect URLs for internal and external access This field is deprecated and will be removed in a future release. See loginURL for details. Deprecated", + "loginURL": "loginURL, along with masterCA, masterURL and masterPublicURL have distinct meanings depending on how the OAuth server is run. The two states are: 1. embedded in the kube api server (all 3.x releases) 2. as a standalone external process (all 4.x releases) in the embedded configuration, loginURL is equivalent to masterPublicURL and the other fields have functionality that matches their docs. in the standalone configuration, the fields are used as: loginURL is the URL required to login to the cluster: oc login --server= masterPublicURL is the issuer URL it is accessible from inside (service network) and outside (ingress) of the cluster masterURL is the loopback variation of the token_endpoint URL with no path component it is only accessible from inside (service network) of the cluster masterCA is used to perform TLS verification for connections made to masterURL For further details, see the IETF Draft: https://tools.ietf.org/html/draft-ietf-oauth-discovery-04#section-2", + "assetPublicURL": "assetPublicURL is used for building valid client redirect URLs for external access", + "alwaysShowProviderSelection": "alwaysShowProviderSelection will force the provider selection page to render even when there is only a single provider.", + "identityProviders": "identityProviders is an ordered list of ways for a user to identify themselves", + "grantConfig": "grantConfig describes how to handle grants", + "sessionConfig": "sessionConfig hold information about configuring sessions.", + "tokenConfig": "tokenConfig contains options for authorization and access tokens", + "templates": "templates allow you to customize pages like the login page.", +} + +func (OAuthConfig) SwaggerDoc() map[string]string { + return map_OAuthConfig +} + +var map_OAuthTemplates = map[string]string{ + "": "OAuthTemplates allow for customization of pages like the login page", + "login": "login is a path to a file containing a go template used to render the login page. If unspecified, the default login page is used.", + "providerSelection": "providerSelection is a path to a file containing a go template used to render the provider selection page. If unspecified, the default provider selection page is used.", + "error": "error is a path to a file containing a go template used to render error pages during the authentication or grant flow If unspecified, the default error page is used.", +} + +func (OAuthTemplates) SwaggerDoc() map[string]string { + return map_OAuthTemplates +} + +var map_OpenIDClaims = map[string]string{ + "": "OpenIDClaims contains a list of OpenID claims to use when authenticating with an OpenID identity provider", + "id": "id is the list of claims whose values should be used as the user ID. Required. OpenID standard identity claim is \"sub\"", + "preferredUsername": "preferredUsername is the list of claims whose values should be used as the preferred username. If unspecified, the preferred username is determined from the value of the id claim", + "name": "name is the list of claims whose values should be used as the display name. Optional. If unspecified, no display name is set for the identity", + "email": "email is the list of claims whose values should be used as the email address. Optional. If unspecified, no email is set for the identity", + "groups": "groups is the list of claims value of which should be used to synchronize groups from the OIDC provider to OpenShift for the user", +} + +func (OpenIDClaims) SwaggerDoc() map[string]string { + return map_OpenIDClaims +} + +var map_OpenIDIdentityProvider = map[string]string{ + "": "OpenIDIdentityProvider provides identities for users authenticating using OpenID credentials\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "ca": "ca is the optional trusted certificate authority bundle to use when making requests to the server If empty, the default system roots are used", + "clientID": "clientID is the oauth client ID", + "clientSecret": "clientSecret is the oauth client secret", + "extraScopes": "extraScopes are any scopes to request in addition to the standard \"openid\" scope.", + "extraAuthorizeParameters": "extraAuthorizeParameters are any custom parameters to add to the authorize request.", + "urls": "urls to use to authenticate", + "claims": "claims mappings", +} + +func (OpenIDIdentityProvider) SwaggerDoc() map[string]string { + return map_OpenIDIdentityProvider +} + +var map_OpenIDURLs = map[string]string{ + "": "OpenIDURLs are URLs to use when authenticating with an OpenID identity provider", + "authorize": "authorize is the oauth authorization URL", + "token": "token is the oauth token granting URL", + "userInfo": "userInfo is the optional userinfo URL. If present, a granted access_token is used to request claims If empty, a granted id_token is parsed for claims", +} + +func (OpenIDURLs) SwaggerDoc() map[string]string { + return map_OpenIDURLs +} + +var map_OsinServerConfig = map[string]string{ + "": "Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "oauthConfig": "oauthConfig holds the necessary configuration options for OAuth authentication", +} + +func (OsinServerConfig) SwaggerDoc() map[string]string { + return map_OsinServerConfig +} + +var map_RequestHeaderIdentityProvider = map[string]string{ + "": "RequestHeaderIdentityProvider provides identities for users authenticating using request header credentials\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "loginURL": "loginURL is a URL to redirect unauthenticated /authorize requests to Unauthenticated requests from OAuth clients which expect interactive logins will be redirected here ${url} is replaced with the current URL, escaped to be safe in a query parameter\n https://www.example.com/sso-login?then=${url}\n${query} is replaced with the current query string\n https://www.example.com/auth-proxy/oauth/authorize?${query}", + "challengeURL": "challengeURL is a URL to redirect unauthenticated /authorize requests to Unauthenticated requests from OAuth clients which expect WWW-Authenticate challenges will be redirected here ${url} is replaced with the current URL, escaped to be safe in a query parameter\n https://www.example.com/sso-login?then=${url}\n${query} is replaced with the current query string\n https://www.example.com/auth-proxy/oauth/authorize?${query}", + "clientCA": "clientCA is a file with the trusted signer certs. If empty, no request verification is done, and any direct request to the OAuth server can impersonate any identity from this provider, merely by setting a request header.", + "clientCommonNames": "clientCommonNames is an optional list of common names to require a match from. If empty, any client certificate validated against the clientCA bundle is considered authoritative.", + "headers": "headers is the set of headers to check for identity information", + "preferredUsernameHeaders": "preferredUsernameHeaders is the set of headers to check for the preferred username", + "nameHeaders": "nameHeaders is the set of headers to check for the display name", + "emailHeaders": "emailHeaders is the set of headers to check for the email address", +} + +func (RequestHeaderIdentityProvider) SwaggerDoc() map[string]string { + return map_RequestHeaderIdentityProvider +} + +var map_SessionConfig = map[string]string{ + "": "SessionConfig specifies options for cookie-based sessions. Used by AuthRequestHandlerSession", + "sessionSecretsFile": "sessionSecretsFile is a reference to a file containing a serialized SessionSecrets object If no file is specified, a random signing and encryption key are generated at each server start", + "sessionMaxAgeSeconds": "sessionMaxAgeSeconds specifies how long created sessions last. Used by AuthRequestHandlerSession", + "sessionName": "sessionName is the cookie name used to store the session", +} + +func (SessionConfig) SwaggerDoc() map[string]string { + return map_SessionConfig +} + +var map_SessionSecret = map[string]string{ + "": "SessionSecret is a secret used to authenticate/decrypt cookie-based sessions", + "authentication": "Authentication is used to authenticate sessions using HMAC. Recommended to use a secret with 32 or 64 bytes.", + "encryption": "Encryption is used to encrypt sessions. Must be 16, 24, or 32 characters long, to select AES-128, AES-", +} + +func (SessionSecret) SwaggerDoc() map[string]string { + return map_SessionSecret +} + +var map_SessionSecrets = map[string]string{ + "": "SessionSecrets list the secrets to use to sign/encrypt and authenticate/decrypt created sessions.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "secrets": "Secrets is a list of secrets New sessions are signed and encrypted using the first secret. Existing sessions are decrypted/authenticated by each secret until one succeeds. This allows rotating secrets.", +} + +func (SessionSecrets) SwaggerDoc() map[string]string { + return map_SessionSecrets +} + +var map_TokenConfig = map[string]string{ + "": "TokenConfig holds the necessary configuration options for authorization and access tokens", + "authorizeTokenMaxAgeSeconds": "authorizeTokenMaxAgeSeconds defines the maximum age of authorize tokens", + "accessTokenMaxAgeSeconds": "accessTokenMaxAgeSeconds defines the maximum age of access tokens", + "accessTokenInactivityTimeoutSeconds": "accessTokenInactivityTimeoutSeconds - DEPRECATED: setting this field has no effect.", + "accessTokenInactivityTimeout": "accessTokenInactivityTimeout defines the token inactivity timeout for tokens granted by any client. The value represents the maximum amount of time that can occur between consecutive uses of the token. Tokens become invalid if they are not used within this temporal window. The user will need to acquire a new token to regain access once a token times out. Takes valid time duration string such as \"5m\", \"1.5h\" or \"2h45m\". The minimum allowed value for duration is 300s (5 minutes). If the timeout is configured per client, then that value takes precedence. If the timeout value is not specified and the client does not override the value, then tokens are valid until their lifetime.", +} + +func (TokenConfig) SwaggerDoc() map[string]string { + return map_TokenConfig +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/api/pkg/serialization/serialization.go b/vendor/github.com/openshift/api/pkg/serialization/serialization.go new file mode 100644 index 000000000..70c8e7a99 --- /dev/null +++ b/vendor/github.com/openshift/api/pkg/serialization/serialization.go @@ -0,0 +1,45 @@ +package serialization + +import ( + "k8s.io/apimachinery/pkg/runtime" +) + +// DecodeNestedRawExtensionOrUnknown +func DecodeNestedRawExtensionOrUnknown(d runtime.Decoder, ext *runtime.RawExtension) { + if ext.Raw == nil || ext.Object != nil { + return + } + obj, gvk, err := d.Decode(ext.Raw, nil, nil) + if err != nil { + unk := &runtime.Unknown{Raw: ext.Raw} + if runtime.IsNotRegisteredError(err) { + if _, gvk, err := d.Decode(ext.Raw, nil, unk); err == nil { + unk.APIVersion = gvk.GroupVersion().String() + unk.Kind = gvk.Kind + ext.Object = unk + return + } + } + // TODO: record mime-type with the object + if gvk != nil { + unk.APIVersion = gvk.GroupVersion().String() + unk.Kind = gvk.Kind + } + obj = unk + } + ext.Object = obj +} + +// EncodeNestedRawExtension will encode the object in the RawExtension (if not nil) or +// return an error. +func EncodeNestedRawExtension(e runtime.Encoder, ext *runtime.RawExtension) error { + if ext.Raw != nil || ext.Object == nil { + return nil + } + data, err := runtime.Encode(e, ext.Object) + if err != nil { + return err + } + ext.Raw = data + return nil +} diff --git a/vendor/github.com/openshift/api/project/OWNERS b/vendor/github.com/openshift/api/project/OWNERS new file mode 100644 index 000000000..9b1548f56 --- /dev/null +++ b/vendor/github.com/openshift/api/project/OWNERS @@ -0,0 +1,2 @@ +reviewers: + - mfojtik diff --git a/vendor/github.com/openshift/api/project/install.go b/vendor/github.com/openshift/api/project/install.go new file mode 100644 index 000000000..c96c7aa26 --- /dev/null +++ b/vendor/github.com/openshift/api/project/install.go @@ -0,0 +1,26 @@ +package project + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + projectv1 "github.com/openshift/api/project/v1" +) + +const ( + GroupName = "project.openshift.io" +) + +var ( + schemeBuilder = runtime.NewSchemeBuilder(projectv1.Install) + // Install is a function which adds every version of this group to a scheme + Install = schemeBuilder.AddToScheme +) + +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +func Kind(kind string) schema.GroupKind { + return schema.GroupKind{Group: GroupName, Kind: kind} +} diff --git a/vendor/github.com/openshift/api/project/v1/doc.go b/vendor/github.com/openshift/api/project/v1/doc.go new file mode 100644 index 000000000..5bbd9d5ea --- /dev/null +++ b/vendor/github.com/openshift/api/project/v1/doc.go @@ -0,0 +1,8 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=github.com/openshift/origin/pkg/project/apis/project +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true + +// +groupName=project.openshift.io +// Package v1 is the v1 version of the API. +package v1 diff --git a/vendor/github.com/openshift/api/project/v1/generated.pb.go b/vendor/github.com/openshift/api/project/v1/generated.pb.go new file mode 100644 index 000000000..822dbbc30 --- /dev/null +++ b/vendor/github.com/openshift/api/project/v1/generated.pb.go @@ -0,0 +1,1305 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/openshift/api/project/v1/generated.proto + +package v1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + v11 "k8s.io/api/core/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *Project) Reset() { *m = Project{} } +func (*Project) ProtoMessage() {} +func (*Project) Descriptor() ([]byte, []int) { + return fileDescriptor_fbf46eaac05029bf, []int{0} +} +func (m *Project) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Project) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Project) XXX_Merge(src proto.Message) { + xxx_messageInfo_Project.Merge(m, src) +} +func (m *Project) XXX_Size() int { + return m.Size() +} +func (m *Project) XXX_DiscardUnknown() { + xxx_messageInfo_Project.DiscardUnknown(m) +} + +var xxx_messageInfo_Project proto.InternalMessageInfo + +func (m *ProjectList) Reset() { *m = ProjectList{} } +func (*ProjectList) ProtoMessage() {} +func (*ProjectList) Descriptor() ([]byte, []int) { + return fileDescriptor_fbf46eaac05029bf, []int{1} +} +func (m *ProjectList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ProjectList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ProjectList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProjectList.Merge(m, src) +} +func (m *ProjectList) XXX_Size() int { + return m.Size() +} +func (m *ProjectList) XXX_DiscardUnknown() { + xxx_messageInfo_ProjectList.DiscardUnknown(m) +} + +var xxx_messageInfo_ProjectList proto.InternalMessageInfo + +func (m *ProjectRequest) Reset() { *m = ProjectRequest{} } +func (*ProjectRequest) ProtoMessage() {} +func (*ProjectRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_fbf46eaac05029bf, []int{2} +} +func (m *ProjectRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ProjectRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ProjectRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProjectRequest.Merge(m, src) +} +func (m *ProjectRequest) XXX_Size() int { + return m.Size() +} +func (m *ProjectRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ProjectRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ProjectRequest proto.InternalMessageInfo + +func (m *ProjectSpec) Reset() { *m = ProjectSpec{} } +func (*ProjectSpec) ProtoMessage() {} +func (*ProjectSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_fbf46eaac05029bf, []int{3} +} +func (m *ProjectSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ProjectSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ProjectSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProjectSpec.Merge(m, src) +} +func (m *ProjectSpec) XXX_Size() int { + return m.Size() +} +func (m *ProjectSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ProjectSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ProjectSpec proto.InternalMessageInfo + +func (m *ProjectStatus) Reset() { *m = ProjectStatus{} } +func (*ProjectStatus) ProtoMessage() {} +func (*ProjectStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_fbf46eaac05029bf, []int{4} +} +func (m *ProjectStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ProjectStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ProjectStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProjectStatus.Merge(m, src) +} +func (m *ProjectStatus) XXX_Size() int { + return m.Size() +} +func (m *ProjectStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ProjectStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ProjectStatus proto.InternalMessageInfo + +func init() { + proto.RegisterType((*Project)(nil), "github.com.openshift.api.project.v1.Project") + proto.RegisterType((*ProjectList)(nil), "github.com.openshift.api.project.v1.ProjectList") + proto.RegisterType((*ProjectRequest)(nil), "github.com.openshift.api.project.v1.ProjectRequest") + proto.RegisterType((*ProjectSpec)(nil), "github.com.openshift.api.project.v1.ProjectSpec") + proto.RegisterType((*ProjectStatus)(nil), "github.com.openshift.api.project.v1.ProjectStatus") +} + +func init() { + proto.RegisterFile("github.com/openshift/api/project/v1/generated.proto", fileDescriptor_fbf46eaac05029bf) +} + +var fileDescriptor_fbf46eaac05029bf = []byte{ + // 573 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x94, 0x4d, 0x6f, 0xd3, 0x30, + 0x18, 0xc7, 0x9b, 0x6d, 0x1d, 0xab, 0xcb, 0x26, 0x14, 0x2e, 0x55, 0x0f, 0x69, 0xc9, 0x24, 0xd4, + 0x03, 0x38, 0xb4, 0xbc, 0x88, 0x73, 0x40, 0x88, 0x49, 0xbc, 0x0c, 0x73, 0xab, 0x38, 0xe0, 0xa6, + 0x6e, 0x6a, 0xba, 0xc4, 0x26, 0x76, 0x2b, 0x8d, 0x13, 0x1f, 0x81, 0x3b, 0x9f, 0x83, 0x2b, 0xe7, + 0x1e, 0x77, 0xdc, 0xa9, 0x5a, 0xc3, 0xb7, 0xd8, 0x09, 0xd9, 0x71, 0x93, 0xc0, 0x8a, 0xd4, 0x5d, + 0xb8, 0xd5, 0x4f, 0xfe, 0xbf, 0x9f, 0xed, 0xe7, 0x49, 0x03, 0x1e, 0x86, 0x54, 0x8e, 0xa7, 0x03, + 0x18, 0xb0, 0xc8, 0x63, 0x9c, 0xc4, 0x62, 0x4c, 0x47, 0xd2, 0xc3, 0x9c, 0x7a, 0x3c, 0x61, 0x9f, + 0x48, 0x20, 0xbd, 0x59, 0xd7, 0x0b, 0x49, 0x4c, 0x12, 0x2c, 0xc9, 0x10, 0xf2, 0x84, 0x49, 0x66, + 0x1f, 0x16, 0x10, 0xcc, 0x21, 0x88, 0x39, 0x85, 0x06, 0x82, 0xb3, 0x6e, 0xf3, 0x7e, 0xc9, 0x1c, + 0xb2, 0x90, 0x79, 0x9a, 0x1d, 0x4c, 0x47, 0x7a, 0xa5, 0x17, 0xfa, 0x57, 0xe6, 0x6c, 0xba, 0x93, + 0xa7, 0x02, 0x52, 0xa6, 0xb7, 0x0e, 0x58, 0x42, 0xd6, 0xec, 0xdb, 0x7c, 0x54, 0x64, 0x22, 0x1c, + 0x8c, 0x69, 0x4c, 0x92, 0x53, 0x8f, 0x4f, 0x42, 0x55, 0x10, 0x5e, 0x44, 0x24, 0x5e, 0x47, 0x3d, + 0xf9, 0x17, 0x95, 0x4c, 0x63, 0x49, 0x23, 0xe2, 0x89, 0x60, 0x4c, 0x22, 0xfc, 0x37, 0xe7, 0x7e, + 0xdf, 0x02, 0x37, 0x8e, 0xb3, 0xfb, 0xd8, 0x1f, 0xc1, 0x9e, 0xd2, 0x0f, 0xb1, 0xc4, 0x0d, 0xab, + 0x6d, 0x75, 0xea, 0xbd, 0x07, 0x30, 0xd3, 0xc2, 0xb2, 0x16, 0xf2, 0x49, 0xa8, 0x0a, 0x02, 0xaa, + 0x34, 0x9c, 0x75, 0xe1, 0xdb, 0x81, 0xe2, 0x5f, 0x13, 0x89, 0x7d, 0x7b, 0xbe, 0x68, 0x55, 0xd2, + 0x45, 0x0b, 0x14, 0x35, 0x94, 0x5b, 0x6d, 0x04, 0x76, 0x04, 0x27, 0x41, 0x63, 0xcb, 0xd8, 0x37, + 0x68, 0x31, 0x34, 0xa7, 0x7b, 0xcf, 0x49, 0xe0, 0xdf, 0x34, 0xf6, 0x1d, 0xb5, 0x42, 0xda, 0x65, + 0xf7, 0xc1, 0xae, 0x90, 0x58, 0x4e, 0x45, 0x63, 0x5b, 0x5b, 0x7b, 0xd7, 0xb2, 0x6a, 0xd2, 0x3f, + 0x30, 0xde, 0xdd, 0x6c, 0x8d, 0x8c, 0xd1, 0xfd, 0x69, 0x81, 0xba, 0x49, 0xbe, 0xa2, 0x42, 0xda, + 0x1f, 0xae, 0x74, 0x08, 0x6e, 0xd6, 0x21, 0x45, 0xeb, 0xfe, 0xdc, 0x32, 0x3b, 0xed, 0xad, 0x2a, + 0xa5, 0xee, 0xbc, 0x03, 0x55, 0x2a, 0x49, 0x24, 0x1a, 0x5b, 0xed, 0xed, 0x4e, 0xbd, 0x77, 0xef, + 0x3a, 0x17, 0xf1, 0xf7, 0x8d, 0xb8, 0x7a, 0xa4, 0x14, 0x28, 0x33, 0xb9, 0x17, 0x16, 0x38, 0x30, + 0x09, 0x44, 0x3e, 0x4f, 0x89, 0xf8, 0x1f, 0x53, 0x7e, 0x0c, 0xea, 0x43, 0x2a, 0xf8, 0x09, 0x3e, + 0x7d, 0x83, 0x23, 0xa2, 0x87, 0x5d, 0xf3, 0x6f, 0x1b, 0xa4, 0xfe, 0xbc, 0x78, 0x84, 0xca, 0x39, + 0x8d, 0x11, 0x11, 0x24, 0x94, 0x4b, 0xca, 0x62, 0x3d, 0xcd, 0x32, 0x56, 0x3c, 0x42, 0xe5, 0x9c, + 0x8b, 0xf3, 0x11, 0xa9, 0x97, 0xc2, 0x46, 0x00, 0x8c, 0x68, 0x8c, 0x4f, 0xe8, 0x17, 0x92, 0x88, + 0x86, 0xd5, 0xde, 0xee, 0xd4, 0xfc, 0x9e, 0x3a, 0xea, 0x8b, 0xbc, 0x7a, 0xb9, 0x68, 0xb5, 0xaf, + 0xfe, 0x11, 0x61, 0x1e, 0xd0, 0x47, 0x2b, 0x59, 0xdc, 0x1f, 0x16, 0xd8, 0xff, 0xe3, 0x85, 0xb1, + 0x5f, 0x82, 0x2a, 0x1f, 0x63, 0x41, 0x74, 0x07, 0x6b, 0x7e, 0x6f, 0xd5, 0xfc, 0x63, 0x55, 0xbc, + 0x5c, 0xb4, 0xee, 0xac, 0xf1, 0x2b, 0xad, 0xe0, 0x38, 0x20, 0x3a, 0x84, 0x32, 0x81, 0xdd, 0x07, + 0x20, 0x60, 0xf1, 0x90, 0xaa, 0xbb, 0xac, 0x26, 0x7f, 0xb7, 0x34, 0x10, 0xa8, 0x70, 0x58, 0xc6, + 0x9f, 0xad, 0xe2, 0xc5, 0x18, 0xf2, 0x92, 0x40, 0x25, 0x9b, 0x7f, 0x34, 0x5f, 0x3a, 0x95, 0xb3, + 0xa5, 0x53, 0x39, 0x5f, 0x3a, 0x95, 0xaf, 0xa9, 0x63, 0xcd, 0x53, 0xc7, 0x3a, 0x4b, 0x1d, 0xeb, + 0x3c, 0x75, 0xac, 0x8b, 0xd4, 0xb1, 0xbe, 0xfd, 0x72, 0x2a, 0xfd, 0xc3, 0x0d, 0xbe, 0x8e, 0xbf, + 0x03, 0x00, 0x00, 0xff, 0xff, 0xb3, 0x9b, 0x1f, 0xba, 0x43, 0x05, 0x00, 0x00, +} + +func (m *Project) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Project) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Project) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ProjectList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ProjectList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ProjectList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ProjectRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ProjectRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ProjectRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Description) + copy(dAtA[i:], m.Description) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) + i-- + dAtA[i] = 0x1a + i -= len(m.DisplayName) + copy(dAtA[i:], m.DisplayName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DisplayName))) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ProjectSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ProjectSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ProjectSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Finalizers) > 0 { + for iNdEx := len(m.Finalizers) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Finalizers[iNdEx]) + copy(dAtA[i:], m.Finalizers[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Finalizers[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ProjectStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ProjectStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ProjectStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Phase) + copy(dAtA[i:], m.Phase) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Project) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ProjectList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ProjectRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DisplayName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Description) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ProjectSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Finalizers) > 0 { + for _, s := range m.Finalizers { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ProjectStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Phase) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Project) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Project{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ProjectSpec", "ProjectSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ProjectStatus", "ProjectStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ProjectList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Project{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Project", "Project", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ProjectList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ProjectRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ProjectRequest{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `DisplayName:` + fmt.Sprintf("%v", this.DisplayName) + `,`, + `Description:` + fmt.Sprintf("%v", this.Description) + `,`, + `}`, + }, "") + return s +} +func (this *ProjectSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ProjectSpec{`, + `Finalizers:` + fmt.Sprintf("%v", this.Finalizers) + `,`, + `}`, + }, "") + return s +} +func (this *ProjectStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]NamespaceCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += fmt.Sprintf("%v", f) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&ProjectStatus{`, + `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Project) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Project: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Project: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ProjectList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ProjectList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ProjectList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Project{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ProjectRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ProjectRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ProjectRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DisplayName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DisplayName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Description = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ProjectSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ProjectSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ProjectSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Finalizers", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Finalizers = append(m.Finalizers, k8s_io_api_core_v1.FinalizerName(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ProjectStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ProjectStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ProjectStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Phase = k8s_io_api_core_v1.NamespacePhase(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, v11.NamespaceCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/openshift/api/project/v1/generated.proto b/vendor/github.com/openshift/api/project/v1/generated.proto new file mode 100644 index 000000000..c86bd8039 --- /dev/null +++ b/vendor/github.com/openshift/api/project/v1/generated.proto @@ -0,0 +1,90 @@ + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package github.com.openshift.api.project.v1; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "github.com/openshift/api/project/v1"; + +// Projects are the unit of isolation and collaboration in OpenShift. A project has one or more members, +// a quota on the resources that the project may consume, and the security controls on the resources in +// the project. Within a project, members may have different roles - project administrators can set +// membership, editors can create and manage the resources, and viewers can see but not access running +// containers. In a normal cluster project administrators are not able to alter their quotas - that is +// restricted to cluster administrators. +// +// Listing or watching projects will return only projects the user has the reader role on. +// +// An OpenShift project is an alternative representation of a Kubernetes namespace. Projects are exposed +// as editable to end users while namespaces are not. Direct creation of a project is typically restricted +// to administrators, while end users should use the requestproject resource. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message Project { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec defines the behavior of the Namespace. + optional ProjectSpec spec = 2; + + // Status describes the current status of a Namespace + // +optional + optional ProjectStatus status = 3; +} + +// ProjectList is a list of Project objects. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message ProjectList { + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of projects + repeated Project items = 2; +} + +// ProjectRequest is the set of options necessary to fully qualify a project request +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message ProjectRequest { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // DisplayName is the display name to apply to a project + optional string displayName = 2; + + // Description is the description to apply to a project + optional string description = 3; +} + +// ProjectSpec describes the attributes on a Project +message ProjectSpec { + // Finalizers is an opaque list of values that must be empty to permanently remove object from storage + repeated string finalizers = 1; +} + +// ProjectStatus is information about the current status of a Project +message ProjectStatus { + // Phase is the current lifecycle phase of the project + // +optional + optional string phase = 1; + + // Represents the latest available observations of the project current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + repeated k8s.io.api.core.v1.NamespaceCondition conditions = 2; +} + diff --git a/vendor/github.com/openshift/api/project/v1/legacy.go b/vendor/github.com/openshift/api/project/v1/legacy.go new file mode 100644 index 000000000..186f905f3 --- /dev/null +++ b/vendor/github.com/openshift/api/project/v1/legacy.go @@ -0,0 +1,23 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + legacyGroupVersion = schema.GroupVersion{Group: "", Version: "v1"} + legacySchemeBuilder = runtime.NewSchemeBuilder(addLegacyKnownTypes, corev1.AddToScheme) + DeprecatedInstallWithoutGroup = legacySchemeBuilder.AddToScheme +) + +func addLegacyKnownTypes(scheme *runtime.Scheme) error { + types := []runtime.Object{ + &Project{}, + &ProjectList{}, + &ProjectRequest{}, + } + scheme.AddKnownTypes(legacyGroupVersion, types...) + return nil +} diff --git a/vendor/github.com/openshift/api/project/v1/register.go b/vendor/github.com/openshift/api/project/v1/register.go new file mode 100644 index 000000000..e471716ce --- /dev/null +++ b/vendor/github.com/openshift/api/project/v1/register.go @@ -0,0 +1,40 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + GroupName = "project.openshift.io" + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, corev1.AddToScheme) + // Install is a function which adds this version to a scheme + Install = schemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = schemeBuilder.AddToScheme +) + +// Resource generated code relies on this being here, but it logically belongs to the group +// DEPRECATED +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &Project{}, + &ProjectList{}, + &ProjectRequest{}, + ) + metav1.AddToGroupVersion(scheme, GroupVersion) + return nil +} diff --git a/vendor/github.com/openshift/api/project/v1/types.go b/vendor/github.com/openshift/api/project/v1/types.go new file mode 100644 index 000000000..9c17a5dea --- /dev/null +++ b/vendor/github.com/openshift/api/project/v1/types.go @@ -0,0 +1,111 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ProjectList is a list of Project objects. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ProjectList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of projects + Items []Project `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +const ( + // These are internal finalizer values to Origin + FinalizerOrigin corev1.FinalizerName = "openshift.io/origin" + // ProjectNodeSelector is an annotation that holds the node selector; + // the node selector annotation determines which nodes will have pods from this project scheduled to them + ProjectNodeSelector = "openshift.io/node-selector" + + // ProjectRequesterAnnotation is the username that requested a given project. Its not guaranteed to be present, + // but it is set by the default project template. + ProjectRequesterAnnotation = "openshift.io/requester" +) + +// ProjectSpec describes the attributes on a Project +type ProjectSpec struct { + // Finalizers is an opaque list of values that must be empty to permanently remove object from storage + Finalizers []corev1.FinalizerName `json:"finalizers,omitempty" protobuf:"bytes,1,rep,name=finalizers,casttype=k8s.io/api/core/v1.FinalizerName"` +} + +// ProjectStatus is information about the current status of a Project +type ProjectStatus struct { + // Phase is the current lifecycle phase of the project + // +optional + Phase corev1.NamespacePhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=k8s.io/api/core/v1.NamespacePhase"` + + // Represents the latest available observations of the project current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []corev1.NamespaceCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=conditions"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Projects are the unit of isolation and collaboration in OpenShift. A project has one or more members, +// a quota on the resources that the project may consume, and the security controls on the resources in +// the project. Within a project, members may have different roles - project administrators can set +// membership, editors can create and manage the resources, and viewers can see but not access running +// containers. In a normal cluster project administrators are not able to alter their quotas - that is +// restricted to cluster administrators. +// +// Listing or watching projects will return only projects the user has the reader role on. +// +// An OpenShift project is an alternative representation of a Kubernetes namespace. Projects are exposed +// as editable to end users while namespaces are not. Direct creation of a project is typically restricted +// to administrators, while end users should use the requestproject resource. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type Project struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec defines the behavior of the Namespace. + Spec ProjectSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Status describes the current status of a Namespace + // +optional + Status ProjectStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +genclient +// +genclient:nonNamespaced +// +genclient:skipVerbs=get,list,create,update,patch,delete,deleteCollection,watch +// +genclient:method=Create,verb=create,result=Project + +// ProjectRequest is the set of options necessary to fully qualify a project request +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ProjectRequest struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // DisplayName is the display name to apply to a project + DisplayName string `json:"displayName,omitempty" protobuf:"bytes,2,opt,name=displayName"` + // Description is the description to apply to a project + Description string `json:"description,omitempty" protobuf:"bytes,3,opt,name=description"` +} diff --git a/vendor/github.com/openshift/api/project/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/project/v1/zz_generated.deepcopy.go new file mode 100644 index 000000000..ddbdda971 --- /dev/null +++ b/vendor/github.com/openshift/api/project/v1/zz_generated.deepcopy.go @@ -0,0 +1,142 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Project) DeepCopyInto(out *Project) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Project. +func (in *Project) DeepCopy() *Project { + if in == nil { + return nil + } + out := new(Project) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Project) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectList) DeepCopyInto(out *ProjectList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Project, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectList. +func (in *ProjectList) DeepCopy() *ProjectList { + if in == nil { + return nil + } + out := new(ProjectList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProjectList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectRequest) DeepCopyInto(out *ProjectRequest) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectRequest. +func (in *ProjectRequest) DeepCopy() *ProjectRequest { + if in == nil { + return nil + } + out := new(ProjectRequest) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProjectRequest) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectSpec) DeepCopyInto(out *ProjectSpec) { + *out = *in + if in.Finalizers != nil { + in, out := &in.Finalizers, &out.Finalizers + *out = make([]corev1.FinalizerName, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectSpec. +func (in *ProjectSpec) DeepCopy() *ProjectSpec { + if in == nil { + return nil + } + out := new(ProjectSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectStatus) DeepCopyInto(out *ProjectStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]corev1.NamespaceCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectStatus. +func (in *ProjectStatus) DeepCopy() *ProjectStatus { + if in == nil { + return nil + } + out := new(ProjectStatus) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/api/project/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/project/v1/zz_generated.swagger_doc_generated.go new file mode 100644 index 000000000..890e651d7 --- /dev/null +++ b/vendor/github.com/openshift/api/project/v1/zz_generated.swagger_doc_generated.go @@ -0,0 +1,65 @@ +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_Project = map[string]string{ + "": "Projects are the unit of isolation and collaboration in OpenShift. A project has one or more members, a quota on the resources that the project may consume, and the security controls on the resources in the project. Within a project, members may have different roles - project administrators can set membership, editors can create and manage the resources, and viewers can see but not access running containers. In a normal cluster project administrators are not able to alter their quotas - that is restricted to cluster administrators.\n\nListing or watching projects will return only projects the user has the reader role on.\n\nAn OpenShift project is an alternative representation of a Kubernetes namespace. Projects are exposed as editable to end users while namespaces are not. Direct creation of a project is typically restricted to administrators, while end users should use the requestproject resource.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Spec defines the behavior of the Namespace.", + "status": "Status describes the current status of a Namespace", +} + +func (Project) SwaggerDoc() map[string]string { + return map_Project +} + +var map_ProjectList = map[string]string{ + "": "ProjectList is a list of Project objects.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "Items is the list of projects", +} + +func (ProjectList) SwaggerDoc() map[string]string { + return map_ProjectList +} + +var map_ProjectRequest = map[string]string{ + "": "ProjectRequest is the set of options necessary to fully qualify a project request\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "displayName": "DisplayName is the display name to apply to a project", + "description": "Description is the description to apply to a project", +} + +func (ProjectRequest) SwaggerDoc() map[string]string { + return map_ProjectRequest +} + +var map_ProjectSpec = map[string]string{ + "": "ProjectSpec describes the attributes on a Project", + "finalizers": "Finalizers is an opaque list of values that must be empty to permanently remove object from storage", +} + +func (ProjectSpec) SwaggerDoc() map[string]string { + return map_ProjectSpec +} + +var map_ProjectStatus = map[string]string{ + "": "ProjectStatus is information about the current status of a Project", + "phase": "Phase is the current lifecycle phase of the project", + "conditions": "Represents the latest available observations of the project current state.", +} + +func (ProjectStatus) SwaggerDoc() map[string]string { + return map_ProjectStatus +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/api/quota/OWNERS b/vendor/github.com/openshift/api/quota/OWNERS new file mode 100644 index 000000000..75dbd7b56 --- /dev/null +++ b/vendor/github.com/openshift/api/quota/OWNERS @@ -0,0 +1,3 @@ +reviewers: + - deads2k + - mfojtik diff --git a/vendor/github.com/openshift/api/quota/install.go b/vendor/github.com/openshift/api/quota/install.go new file mode 100644 index 000000000..2a88e7d0a --- /dev/null +++ b/vendor/github.com/openshift/api/quota/install.go @@ -0,0 +1,26 @@ +package quota + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + quotav1 "github.com/openshift/api/quota/v1" +) + +const ( + GroupName = "quota.openshift.io" +) + +var ( + schemeBuilder = runtime.NewSchemeBuilder(quotav1.Install) + // Install is a function which adds every version of this group to a scheme + Install = schemeBuilder.AddToScheme +) + +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +func Kind(kind string) schema.GroupKind { + return schema.GroupKind{Group: GroupName, Kind: kind} +} diff --git a/vendor/github.com/openshift/api/quota/v1/0000_03_quota-openshift_01_clusterresourcequota.crd.yaml b/vendor/github.com/openshift/api/quota/v1/0000_03_quota-openshift_01_clusterresourcequota.crd.yaml new file mode 100644 index 000000000..35e932383 --- /dev/null +++ b/vendor/github.com/openshift/api/quota/v1/0000_03_quota-openshift_01_clusterresourcequota.crd.yaml @@ -0,0 +1,197 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: clusterresourcequotas.quota.openshift.io +spec: + group: quota.openshift.io + names: + kind: ClusterResourceQuota + listKind: ClusterResourceQuotaList + plural: clusterresourcequotas + singular: clusterresourcequota + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "ClusterResourceQuota mirrors ResourceQuota at a cluster scope. This object is easily convertible to synthetic ResourceQuota object to allow quota evaluation re-use. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + type: object + required: + - metadata + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Spec defines the desired quota + type: object + required: + - quota + - selector + properties: + quota: + description: Quota defines the desired quota + type: object + properties: + hard: + description: 'hard is the set of desired hard limits for each named resource. More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/' + type: object + additionalProperties: + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + scopeSelector: + description: scopeSelector is also a collection of filters like scopes that must match each object tracked by a quota but expressed using ScopeSelectorOperator in combination with possible values. For a resource to match, both scopes AND scopeSelector (if specified in spec), must be matched. + type: object + properties: + matchExpressions: + description: A list of scope selector requirements by scope of the resources. + type: array + items: + description: A scoped-resource selector requirement is a selector that contains values, a scope name, and an operator that relates the scope name and values. + type: object + required: + - operator + - scopeName + properties: + operator: + description: Represents a scope's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. + type: string + scopeName: + description: The name of the scope that the selector applies to. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + x-kubernetes-map-type: atomic + scopes: + description: A collection of filters that must match each object tracked by a quota. If not specified, the quota matches all objects. + type: array + items: + description: A ResourceQuotaScope defines a filter that must match each object tracked by a quota + type: string + selector: + description: Selector is the selector used to match projects. It should only select active projects on the scale of dozens (though it can select many more less active projects). These projects will contend on object creation through this resource. + type: object + properties: + annotations: + description: AnnotationSelector is used to select projects by annotation. + type: object + additionalProperties: + type: string + nullable: true + labels: + description: LabelSelector is used to select projects by label. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + nullable: true + x-kubernetes-map-type: atomic + status: + description: Status defines the actual enforced quota and its current usage + type: object + required: + - total + properties: + namespaces: + description: Namespaces slices the usage by project. This division allows for quick resolution of deletion reconciliation inside of a single project without requiring a recalculation across all projects. This can be used to pull the deltas for a given project. + type: array + items: + description: ResourceQuotaStatusByNamespace gives status for a particular project + type: object + required: + - namespace + - status + properties: + namespace: + description: Namespace the project this status applies to + type: string + status: + description: Status indicates how many resources have been consumed by this project + type: object + properties: + hard: + description: 'Hard is the set of enforced hard limits for each named resource. More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/' + type: object + additionalProperties: + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + used: + description: Used is the current observed total usage of the resource in the namespace. + type: object + additionalProperties: + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + nullable: true + total: + description: Total defines the actual enforced quota and its current usage across all projects + type: object + properties: + hard: + description: 'Hard is the set of enforced hard limits for each named resource. More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/' + type: object + additionalProperties: + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + used: + description: Used is the current observed total usage of the resource in the namespace. + type: object + additionalProperties: + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/quota/v1/Makefile b/vendor/github.com/openshift/api/quota/v1/Makefile new file mode 100644 index 000000000..691859dd8 --- /dev/null +++ b/vendor/github.com/openshift/api/quota/v1/Makefile @@ -0,0 +1,3 @@ +.PHONY: test +test: + make -C ../../tests test GINKGO_EXTRA_ARGS=--focus="quota.openshift.io/v1" diff --git a/vendor/github.com/openshift/api/quota/v1/doc.go b/vendor/github.com/openshift/api/quota/v1/doc.go new file mode 100644 index 000000000..ae5c9c2c7 --- /dev/null +++ b/vendor/github.com/openshift/api/quota/v1/doc.go @@ -0,0 +1,8 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=github.com/openshift/origin/pkg/quota/apis/quota +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true + +// +groupName=quota.openshift.io +// Package v1 is the v1 version of the API. +package v1 diff --git a/vendor/github.com/openshift/api/quota/v1/generated.pb.go b/vendor/github.com/openshift/api/quota/v1/generated.pb.go new file mode 100644 index 000000000..7556462cf --- /dev/null +++ b/vendor/github.com/openshift/api/quota/v1/generated.pb.go @@ -0,0 +1,2152 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/openshift/api/quota/v1/generated.proto + +package v1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *AppliedClusterResourceQuota) Reset() { *m = AppliedClusterResourceQuota{} } +func (*AppliedClusterResourceQuota) ProtoMessage() {} +func (*AppliedClusterResourceQuota) Descriptor() ([]byte, []int) { + return fileDescriptor_f605e5b8440aecb8, []int{0} +} +func (m *AppliedClusterResourceQuota) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AppliedClusterResourceQuota) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AppliedClusterResourceQuota) XXX_Merge(src proto.Message) { + xxx_messageInfo_AppliedClusterResourceQuota.Merge(m, src) +} +func (m *AppliedClusterResourceQuota) XXX_Size() int { + return m.Size() +} +func (m *AppliedClusterResourceQuota) XXX_DiscardUnknown() { + xxx_messageInfo_AppliedClusterResourceQuota.DiscardUnknown(m) +} + +var xxx_messageInfo_AppliedClusterResourceQuota proto.InternalMessageInfo + +func (m *AppliedClusterResourceQuotaList) Reset() { *m = AppliedClusterResourceQuotaList{} } +func (*AppliedClusterResourceQuotaList) ProtoMessage() {} +func (*AppliedClusterResourceQuotaList) Descriptor() ([]byte, []int) { + return fileDescriptor_f605e5b8440aecb8, []int{1} +} +func (m *AppliedClusterResourceQuotaList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AppliedClusterResourceQuotaList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AppliedClusterResourceQuotaList) XXX_Merge(src proto.Message) { + xxx_messageInfo_AppliedClusterResourceQuotaList.Merge(m, src) +} +func (m *AppliedClusterResourceQuotaList) XXX_Size() int { + return m.Size() +} +func (m *AppliedClusterResourceQuotaList) XXX_DiscardUnknown() { + xxx_messageInfo_AppliedClusterResourceQuotaList.DiscardUnknown(m) +} + +var xxx_messageInfo_AppliedClusterResourceQuotaList proto.InternalMessageInfo + +func (m *ClusterResourceQuota) Reset() { *m = ClusterResourceQuota{} } +func (*ClusterResourceQuota) ProtoMessage() {} +func (*ClusterResourceQuota) Descriptor() ([]byte, []int) { + return fileDescriptor_f605e5b8440aecb8, []int{2} +} +func (m *ClusterResourceQuota) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterResourceQuota) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterResourceQuota) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterResourceQuota.Merge(m, src) +} +func (m *ClusterResourceQuota) XXX_Size() int { + return m.Size() +} +func (m *ClusterResourceQuota) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterResourceQuota.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterResourceQuota proto.InternalMessageInfo + +func (m *ClusterResourceQuotaList) Reset() { *m = ClusterResourceQuotaList{} } +func (*ClusterResourceQuotaList) ProtoMessage() {} +func (*ClusterResourceQuotaList) Descriptor() ([]byte, []int) { + return fileDescriptor_f605e5b8440aecb8, []int{3} +} +func (m *ClusterResourceQuotaList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterResourceQuotaList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterResourceQuotaList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterResourceQuotaList.Merge(m, src) +} +func (m *ClusterResourceQuotaList) XXX_Size() int { + return m.Size() +} +func (m *ClusterResourceQuotaList) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterResourceQuotaList.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterResourceQuotaList proto.InternalMessageInfo + +func (m *ClusterResourceQuotaSelector) Reset() { *m = ClusterResourceQuotaSelector{} } +func (*ClusterResourceQuotaSelector) ProtoMessage() {} +func (*ClusterResourceQuotaSelector) Descriptor() ([]byte, []int) { + return fileDescriptor_f605e5b8440aecb8, []int{4} +} +func (m *ClusterResourceQuotaSelector) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterResourceQuotaSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterResourceQuotaSelector) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterResourceQuotaSelector.Merge(m, src) +} +func (m *ClusterResourceQuotaSelector) XXX_Size() int { + return m.Size() +} +func (m *ClusterResourceQuotaSelector) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterResourceQuotaSelector.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterResourceQuotaSelector proto.InternalMessageInfo + +func (m *ClusterResourceQuotaSpec) Reset() { *m = ClusterResourceQuotaSpec{} } +func (*ClusterResourceQuotaSpec) ProtoMessage() {} +func (*ClusterResourceQuotaSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_f605e5b8440aecb8, []int{5} +} +func (m *ClusterResourceQuotaSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterResourceQuotaSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterResourceQuotaSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterResourceQuotaSpec.Merge(m, src) +} +func (m *ClusterResourceQuotaSpec) XXX_Size() int { + return m.Size() +} +func (m *ClusterResourceQuotaSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterResourceQuotaSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterResourceQuotaSpec proto.InternalMessageInfo + +func (m *ClusterResourceQuotaStatus) Reset() { *m = ClusterResourceQuotaStatus{} } +func (*ClusterResourceQuotaStatus) ProtoMessage() {} +func (*ClusterResourceQuotaStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_f605e5b8440aecb8, []int{6} +} +func (m *ClusterResourceQuotaStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterResourceQuotaStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterResourceQuotaStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterResourceQuotaStatus.Merge(m, src) +} +func (m *ClusterResourceQuotaStatus) XXX_Size() int { + return m.Size() +} +func (m *ClusterResourceQuotaStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterResourceQuotaStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterResourceQuotaStatus proto.InternalMessageInfo + +func (m *ResourceQuotaStatusByNamespace) Reset() { *m = ResourceQuotaStatusByNamespace{} } +func (*ResourceQuotaStatusByNamespace) ProtoMessage() {} +func (*ResourceQuotaStatusByNamespace) Descriptor() ([]byte, []int) { + return fileDescriptor_f605e5b8440aecb8, []int{7} +} +func (m *ResourceQuotaStatusByNamespace) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceQuotaStatusByNamespace) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceQuotaStatusByNamespace) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceQuotaStatusByNamespace.Merge(m, src) +} +func (m *ResourceQuotaStatusByNamespace) XXX_Size() int { + return m.Size() +} +func (m *ResourceQuotaStatusByNamespace) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceQuotaStatusByNamespace.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceQuotaStatusByNamespace proto.InternalMessageInfo + +func init() { + proto.RegisterType((*AppliedClusterResourceQuota)(nil), "github.com.openshift.api.quota.v1.AppliedClusterResourceQuota") + proto.RegisterType((*AppliedClusterResourceQuotaList)(nil), "github.com.openshift.api.quota.v1.AppliedClusterResourceQuotaList") + proto.RegisterType((*ClusterResourceQuota)(nil), "github.com.openshift.api.quota.v1.ClusterResourceQuota") + proto.RegisterType((*ClusterResourceQuotaList)(nil), "github.com.openshift.api.quota.v1.ClusterResourceQuotaList") + proto.RegisterType((*ClusterResourceQuotaSelector)(nil), "github.com.openshift.api.quota.v1.ClusterResourceQuotaSelector") + proto.RegisterMapType((map[string]string)(nil), "github.com.openshift.api.quota.v1.ClusterResourceQuotaSelector.AnnotationsEntry") + proto.RegisterType((*ClusterResourceQuotaSpec)(nil), "github.com.openshift.api.quota.v1.ClusterResourceQuotaSpec") + proto.RegisterType((*ClusterResourceQuotaStatus)(nil), "github.com.openshift.api.quota.v1.ClusterResourceQuotaStatus") + proto.RegisterType((*ResourceQuotaStatusByNamespace)(nil), "github.com.openshift.api.quota.v1.ResourceQuotaStatusByNamespace") +} + +func init() { + proto.RegisterFile("github.com/openshift/api/quota/v1/generated.proto", fileDescriptor_f605e5b8440aecb8) +} + +var fileDescriptor_f605e5b8440aecb8 = []byte{ + // 716 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x56, 0x41, 0x6f, 0xd3, 0x3e, + 0x1c, 0x6d, 0xba, 0x75, 0x5a, 0xbd, 0xff, 0xfe, 0xda, 0xac, 0x1d, 0xaa, 0x82, 0xd2, 0x2d, 0x12, + 0x62, 0x17, 0x1c, 0x3a, 0x10, 0x4c, 0x20, 0x86, 0x16, 0x84, 0x10, 0x68, 0x30, 0x08, 0x9c, 0xd0, + 0x40, 0xb8, 0x99, 0xd7, 0x86, 0x26, 0x71, 0x88, 0x9d, 0x4a, 0xbd, 0xf1, 0x09, 0x10, 0x9f, 0x81, + 0x0f, 0xc2, 0x0d, 0x69, 0x37, 0x76, 0x01, 0xed, 0x34, 0xd1, 0xc0, 0x07, 0x41, 0x76, 0xdc, 0xa4, + 0xdb, 0xda, 0xad, 0x6c, 0x07, 0x2e, 0xdc, 0xe2, 0x5f, 0xfd, 0xde, 0xfb, 0xfd, 0x5e, 0x9e, 0xdd, + 0x80, 0x7a, 0xd3, 0xe5, 0xad, 0xb8, 0x81, 0x1c, 0xea, 0x9b, 0x34, 0x24, 0x01, 0x6b, 0xb9, 0x3b, + 0xdc, 0xc4, 0xa1, 0x6b, 0xbe, 0x8b, 0x29, 0xc7, 0x66, 0xa7, 0x6e, 0x36, 0x49, 0x40, 0x22, 0xcc, + 0xc9, 0x36, 0x0a, 0x23, 0xca, 0x29, 0x5c, 0xca, 0x21, 0x28, 0x83, 0x20, 0x1c, 0xba, 0x48, 0x42, + 0x50, 0xa7, 0x5e, 0xbd, 0x32, 0xc0, 0xda, 0xa4, 0x4d, 0x6a, 0x4a, 0x64, 0x23, 0xde, 0x91, 0x2b, + 0xb9, 0x90, 0x4f, 0x29, 0x63, 0xd5, 0x68, 0xaf, 0x32, 0xe4, 0x52, 0x29, 0xeb, 0xd0, 0x88, 0x0c, + 0x51, 0xad, 0x5e, 0xcf, 0xf7, 0xf8, 0xd8, 0x69, 0xb9, 0x01, 0x89, 0xba, 0x66, 0xd8, 0x6e, 0x8a, + 0x02, 0x33, 0x7d, 0x32, 0xb4, 0xd7, 0xea, 0x8d, 0x51, 0xa8, 0x28, 0x0e, 0xb8, 0xeb, 0x13, 0x93, + 0x39, 0x2d, 0xe2, 0xe3, 0xa3, 0x38, 0xe3, 0x4b, 0x11, 0x5c, 0x58, 0x0f, 0x43, 0xcf, 0x25, 0xdb, + 0xf7, 0xbc, 0x98, 0x71, 0x12, 0xd9, 0x84, 0xd1, 0x38, 0x72, 0xc8, 0x33, 0x31, 0x23, 0x7c, 0x03, + 0xa6, 0x85, 0xe4, 0x36, 0xe6, 0xb8, 0xa2, 0x2d, 0x6a, 0xcb, 0x33, 0x2b, 0x57, 0x51, 0x2a, 0x85, + 0x06, 0xa5, 0x50, 0xd8, 0x6e, 0x8a, 0x02, 0x43, 0x62, 0x37, 0xea, 0xd4, 0xd1, 0x66, 0xe3, 0x2d, + 0x71, 0xf8, 0x63, 0xc2, 0xb1, 0x05, 0x77, 0x0f, 0x6a, 0x85, 0xe4, 0xa0, 0x06, 0xf2, 0x9a, 0x9d, + 0xb1, 0xc2, 0x57, 0x60, 0x92, 0x85, 0xc4, 0xa9, 0x14, 0x25, 0xfb, 0x6d, 0x74, 0xaa, 0xe9, 0x68, + 0x58, 0xa3, 0xcf, 0x43, 0xe2, 0x58, 0xff, 0x29, 0xa1, 0x49, 0xb1, 0xb2, 0x25, 0x2d, 0x24, 0x60, + 0x8a, 0x71, 0xcc, 0x63, 0x56, 0x99, 0x90, 0x02, 0x77, 0xce, 0x2a, 0x20, 0x49, 0xac, 0xff, 0x95, + 0xc4, 0x54, 0xba, 0xb6, 0x15, 0xb9, 0xf1, 0x4b, 0x03, 0xb5, 0x13, 0x7c, 0xdc, 0x70, 0x19, 0x87, + 0x5b, 0xc7, 0xbc, 0x44, 0xe3, 0x79, 0x29, 0xd0, 0xd2, 0xc9, 0x39, 0xa5, 0x3e, 0xdd, 0xaf, 0x0c, + 0xf8, 0xe8, 0x80, 0x92, 0xcb, 0x89, 0xcf, 0x2a, 0xc5, 0xc5, 0x89, 0xe5, 0x99, 0x95, 0xb5, 0x31, + 0xe6, 0x3c, 0xa1, 0x61, 0x6b, 0x56, 0x49, 0x95, 0x1e, 0x0a, 0x52, 0x3b, 0xe5, 0x36, 0x3e, 0x17, + 0xc1, 0xc2, 0xbf, 0x9c, 0x9c, 0x23, 0x27, 0xdf, 0x35, 0x50, 0xf9, 0x4b, 0x01, 0xd9, 0x3a, 0x1c, + 0x90, 0x9b, 0x67, 0x1c, 0x70, 0x44, 0x32, 0xbe, 0x16, 0xc1, 0xc5, 0xa1, 0x7e, 0x10, 0x8f, 0x38, + 0x9c, 0x46, 0xf0, 0x35, 0x98, 0xf2, 0x70, 0x83, 0x78, 0x4c, 0x8d, 0x76, 0x6d, 0xcc, 0xd1, 0x04, + 0xa6, 0x4f, 0x62, 0xcd, 0x27, 0x07, 0xb5, 0xd9, 0x43, 0x25, 0x5b, 0xb1, 0xc2, 0x0f, 0x1a, 0x98, + 0xc1, 0x41, 0x40, 0x39, 0xe6, 0x2e, 0x0d, 0xfa, 0x53, 0x3e, 0x3d, 0xeb, 0x6b, 0x54, 0xf4, 0x68, + 0x3d, 0xa7, 0xbc, 0x1f, 0xf0, 0xa8, 0x6b, 0x55, 0xd5, 0xf8, 0x30, 0xff, 0x25, 0xeb, 0x65, 0xb0, + 0x81, 0xea, 0x1a, 0x98, 0x3b, 0x0a, 0x86, 0x73, 0x60, 0xa2, 0x4d, 0xba, 0xd2, 0x81, 0xb2, 0x2d, + 0x1e, 0xe1, 0x02, 0x28, 0x75, 0xb0, 0x17, 0x13, 0x99, 0xeb, 0xb2, 0x9d, 0x2e, 0x6e, 0x15, 0x57, + 0x35, 0xe3, 0xdb, 0x88, 0xa8, 0x88, 0xd0, 0x42, 0x1f, 0x4c, 0x33, 0xa5, 0xaa, 0xfc, 0xbc, 0x7b, + 0xce, 0x49, 0xf3, 0xec, 0x64, 0xe3, 0x64, 0x12, 0xf0, 0x11, 0x28, 0x49, 0x12, 0x75, 0xfa, 0x2e, + 0x0d, 0xbc, 0x3b, 0x24, 0xfe, 0xc8, 0x04, 0xf9, 0xf1, 0x73, 0x96, 0x25, 0x45, 0x96, 0xec, 0x94, + 0xc2, 0xe8, 0x69, 0xa0, 0x3a, 0xfa, 0xe4, 0xc0, 0x0d, 0x50, 0xe2, 0x94, 0x63, 0x4f, 0x8d, 0x75, + 0xf9, 0x74, 0xa9, 0xf4, 0xc4, 0x65, 0x62, 0x2f, 0x04, 0xda, 0x4e, 0x49, 0x60, 0x0c, 0x40, 0x80, + 0x7d, 0xc2, 0x42, 0xec, 0x90, 0x7e, 0x26, 0xd6, 0xc7, 0x70, 0x6a, 0x98, 0x42, 0xf7, 0x49, 0x9f, + 0x29, 0xbf, 0xaa, 0xb2, 0x12, 0xb3, 0x07, 0x84, 0x8c, 0x4f, 0x1a, 0xd0, 0x4f, 0xa6, 0x80, 0x26, + 0x28, 0x67, 0x80, 0x34, 0x10, 0xd6, 0xbc, 0x62, 0x2d, 0x67, 0xbb, 0xec, 0x7c, 0x0f, 0xdc, 0xcc, + 0x6e, 0xa8, 0xe2, 0x9f, 0x39, 0x33, 0xe2, 0x2e, 0xb2, 0x1e, 0xec, 0xf6, 0xf4, 0xc2, 0x5e, 0x4f, + 0x2f, 0xec, 0xf7, 0xf4, 0xc2, 0xfb, 0x44, 0xd7, 0x76, 0x13, 0x5d, 0xdb, 0x4b, 0x74, 0x6d, 0x3f, + 0xd1, 0xb5, 0x1f, 0x89, 0xae, 0x7d, 0xfc, 0xa9, 0x17, 0x5e, 0x2e, 0x9d, 0xfa, 0xe1, 0xf4, 0x3b, + 0x00, 0x00, 0xff, 0xff, 0xda, 0x49, 0x50, 0x7b, 0x5c, 0x09, 0x00, 0x00, +} + +func (m *AppliedClusterResourceQuota) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AppliedClusterResourceQuota) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AppliedClusterResourceQuota) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *AppliedClusterResourceQuotaList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AppliedClusterResourceQuotaList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AppliedClusterResourceQuotaList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ClusterResourceQuota) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterResourceQuota) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterResourceQuota) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ClusterResourceQuotaList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterResourceQuotaList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterResourceQuotaList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ClusterResourceQuotaSelector) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterResourceQuotaSelector) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterResourceQuotaSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.AnnotationSelector) > 0 { + keysForAnnotationSelector := make([]string, 0, len(m.AnnotationSelector)) + for k := range m.AnnotationSelector { + keysForAnnotationSelector = append(keysForAnnotationSelector, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotationSelector) + for iNdEx := len(keysForAnnotationSelector) - 1; iNdEx >= 0; iNdEx-- { + v := m.AnnotationSelector[string(keysForAnnotationSelector[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForAnnotationSelector[iNdEx]) + copy(dAtA[i:], keysForAnnotationSelector[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAnnotationSelector[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if m.LabelSelector != nil { + { + size, err := m.LabelSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ClusterResourceQuotaSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterResourceQuotaSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterResourceQuotaSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Quota.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ClusterResourceQuotaStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterResourceQuotaStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterResourceQuotaStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Namespaces) > 0 { + for iNdEx := len(m.Namespaces) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Namespaces[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.Total.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceQuotaStatusByNamespace) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceQuotaStatusByNamespace) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceQuotaStatusByNamespace) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *AppliedClusterResourceQuota) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *AppliedClusterResourceQuotaList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ClusterResourceQuota) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ClusterResourceQuotaList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ClusterResourceQuotaSelector) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.LabelSelector != nil { + l = m.LabelSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.AnnotationSelector) > 0 { + for k, v := range m.AnnotationSelector { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *ClusterResourceQuotaSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Quota.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ClusterResourceQuotaStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Total.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Namespaces) > 0 { + for _, e := range m.Namespaces { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ResourceQuotaStatusByNamespace) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *AppliedClusterResourceQuota) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AppliedClusterResourceQuota{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ClusterResourceQuotaSpec", "ClusterResourceQuotaSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ClusterResourceQuotaStatus", "ClusterResourceQuotaStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *AppliedClusterResourceQuotaList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]AppliedClusterResourceQuota{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "AppliedClusterResourceQuota", "AppliedClusterResourceQuota", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&AppliedClusterResourceQuotaList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ClusterResourceQuota) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClusterResourceQuota{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ClusterResourceQuotaSpec", "ClusterResourceQuotaSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ClusterResourceQuotaStatus", "ClusterResourceQuotaStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ClusterResourceQuotaList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ClusterResourceQuota{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ClusterResourceQuota", "ClusterResourceQuota", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ClusterResourceQuotaList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ClusterResourceQuotaSelector) String() string { + if this == nil { + return "nil" + } + keysForAnnotationSelector := make([]string, 0, len(this.AnnotationSelector)) + for k := range this.AnnotationSelector { + keysForAnnotationSelector = append(keysForAnnotationSelector, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotationSelector) + mapStringForAnnotationSelector := "map[string]string{" + for _, k := range keysForAnnotationSelector { + mapStringForAnnotationSelector += fmt.Sprintf("%v: %v,", k, this.AnnotationSelector[k]) + } + mapStringForAnnotationSelector += "}" + s := strings.Join([]string{`&ClusterResourceQuotaSelector{`, + `LabelSelector:` + strings.Replace(fmt.Sprintf("%v", this.LabelSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `AnnotationSelector:` + mapStringForAnnotationSelector + `,`, + `}`, + }, "") + return s +} +func (this *ClusterResourceQuotaSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClusterResourceQuotaSpec{`, + `Selector:` + strings.Replace(strings.Replace(this.Selector.String(), "ClusterResourceQuotaSelector", "ClusterResourceQuotaSelector", 1), `&`, ``, 1) + `,`, + `Quota:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Quota), "ResourceQuotaSpec", "v11.ResourceQuotaSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ClusterResourceQuotaStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForNamespaces := "[]ResourceQuotaStatusByNamespace{" + for _, f := range this.Namespaces { + repeatedStringForNamespaces += strings.Replace(strings.Replace(f.String(), "ResourceQuotaStatusByNamespace", "ResourceQuotaStatusByNamespace", 1), `&`, ``, 1) + "," + } + repeatedStringForNamespaces += "}" + s := strings.Join([]string{`&ClusterResourceQuotaStatus{`, + `Total:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Total), "ResourceQuotaStatus", "v11.ResourceQuotaStatus", 1), `&`, ``, 1) + `,`, + `Namespaces:` + repeatedStringForNamespaces + `,`, + `}`, + }, "") + return s +} +func (this *ResourceQuotaStatusByNamespace) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceQuotaStatusByNamespace{`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Status:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Status), "ResourceQuotaStatus", "v11.ResourceQuotaStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *AppliedClusterResourceQuota) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AppliedClusterResourceQuota: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AppliedClusterResourceQuota: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AppliedClusterResourceQuotaList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AppliedClusterResourceQuotaList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AppliedClusterResourceQuotaList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, AppliedClusterResourceQuota{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterResourceQuota) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterResourceQuota: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterResourceQuota: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterResourceQuotaList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterResourceQuotaList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterResourceQuotaList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ClusterResourceQuota{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterResourceQuotaSelector) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterResourceQuotaSelector: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterResourceQuotaSelector: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LabelSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LabelSelector == nil { + m.LabelSelector = &v1.LabelSelector{} + } + if err := m.LabelSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AnnotationSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AnnotationSelector == nil { + m.AnnotationSelector = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.AnnotationSelector[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterResourceQuotaSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterResourceQuotaSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterResourceQuotaSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Quota", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Quota.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterResourceQuotaStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterResourceQuotaStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterResourceQuotaStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Total", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Total.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespaces", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespaces = append(m.Namespaces, ResourceQuotaStatusByNamespace{}) + if err := m.Namespaces[len(m.Namespaces)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceQuotaStatusByNamespace) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceQuotaStatusByNamespace: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceQuotaStatusByNamespace: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/openshift/api/quota/v1/generated.proto b/vendor/github.com/openshift/api/quota/v1/generated.proto new file mode 100644 index 000000000..70983e82d --- /dev/null +++ b/vendor/github.com/openshift/api/quota/v1/generated.proto @@ -0,0 +1,124 @@ + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package github.com.openshift.api.quota.v1; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "github.com/openshift/api/quota/v1"; + +// AppliedClusterResourceQuota mirrors ClusterResourceQuota at a project scope, for projection +// into a project. It allows a project-admin to know which ClusterResourceQuotas are applied to +// his project and their associated usage. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message AppliedClusterResourceQuota { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec defines the desired quota + optional ClusterResourceQuotaSpec spec = 2; + + // Status defines the actual enforced quota and its current usage + optional ClusterResourceQuotaStatus status = 3; +} + +// AppliedClusterResourceQuotaList is a collection of AppliedClusterResourceQuotas +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message AppliedClusterResourceQuotaList { + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of AppliedClusterResourceQuota + repeated AppliedClusterResourceQuota items = 2; +} + +// ClusterResourceQuota mirrors ResourceQuota at a cluster scope. This object is easily convertible to +// synthetic ResourceQuota object to allow quota evaluation re-use. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message ClusterResourceQuota { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec defines the desired quota + optional ClusterResourceQuotaSpec spec = 2; + + // Status defines the actual enforced quota and its current usage + optional ClusterResourceQuotaStatus status = 3; +} + +// ClusterResourceQuotaList is a collection of ClusterResourceQuotas +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message ClusterResourceQuotaList { + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of ClusterResourceQuotas + repeated ClusterResourceQuota items = 2; +} + +// ClusterResourceQuotaSelector is used to select projects. At least one of LabelSelector or AnnotationSelector +// must present. If only one is present, it is the only selection criteria. If both are specified, +// the project must match both restrictions. +message ClusterResourceQuotaSelector { + // LabelSelector is used to select projects by label. + // +optional + // +nullable + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector labels = 1; + + // AnnotationSelector is used to select projects by annotation. + // +optional + // +nullable + map annotations = 2; +} + +// ClusterResourceQuotaSpec defines the desired quota restrictions +message ClusterResourceQuotaSpec { + // Selector is the selector used to match projects. + // It should only select active projects on the scale of dozens (though it can select + // many more less active projects). These projects will contend on object creation through + // this resource. + optional ClusterResourceQuotaSelector selector = 1; + + // Quota defines the desired quota + optional k8s.io.api.core.v1.ResourceQuotaSpec quota = 2; +} + +// ClusterResourceQuotaStatus defines the actual enforced quota and its current usage +message ClusterResourceQuotaStatus { + // Total defines the actual enforced quota and its current usage across all projects + optional k8s.io.api.core.v1.ResourceQuotaStatus total = 1; + + // Namespaces slices the usage by project. This division allows for quick resolution of + // deletion reconciliation inside of a single project without requiring a recalculation + // across all projects. This can be used to pull the deltas for a given project. + // +optional + // +nullable + repeated ResourceQuotaStatusByNamespace namespaces = 2; +} + +// ResourceQuotaStatusByNamespace gives status for a particular project +message ResourceQuotaStatusByNamespace { + // Namespace the project this status applies to + optional string namespace = 1; + + // Status indicates how many resources have been consumed by this project + optional k8s.io.api.core.v1.ResourceQuotaStatus status = 2; +} + diff --git a/vendor/github.com/openshift/api/quota/v1/legacy.go b/vendor/github.com/openshift/api/quota/v1/legacy.go new file mode 100644 index 000000000..402690b5d --- /dev/null +++ b/vendor/github.com/openshift/api/quota/v1/legacy.go @@ -0,0 +1,24 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + legacyGroupVersion = schema.GroupVersion{Group: "", Version: "v1"} + legacySchemeBuilder = runtime.NewSchemeBuilder(addLegacyKnownTypes, corev1.AddToScheme) + DeprecatedInstallWithoutGroup = legacySchemeBuilder.AddToScheme +) + +func addLegacyKnownTypes(scheme *runtime.Scheme) error { + types := []runtime.Object{ + &ClusterResourceQuota{}, + &ClusterResourceQuotaList{}, + &AppliedClusterResourceQuota{}, + &AppliedClusterResourceQuotaList{}, + } + scheme.AddKnownTypes(legacyGroupVersion, types...) + return nil +} diff --git a/vendor/github.com/openshift/api/quota/v1/register.go b/vendor/github.com/openshift/api/quota/v1/register.go new file mode 100644 index 000000000..47c774ef2 --- /dev/null +++ b/vendor/github.com/openshift/api/quota/v1/register.go @@ -0,0 +1,41 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + GroupName = "quota.openshift.io" + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, corev1.AddToScheme) + // Install is a function which adds this version to a scheme + Install = schemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = schemeBuilder.AddToScheme +) + +// Resource generated code relies on this being here, but it logically belongs to the group +// DEPRECATED +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &ClusterResourceQuota{}, + &ClusterResourceQuotaList{}, + &AppliedClusterResourceQuota{}, + &AppliedClusterResourceQuotaList{}, + ) + metav1.AddToGroupVersion(scheme, GroupVersion) + return nil +} diff --git a/vendor/github.com/openshift/api/quota/v1/stable.clusterresourcequota.testsuite.yaml b/vendor/github.com/openshift/api/quota/v1/stable.clusterresourcequota.testsuite.yaml new file mode 100644 index 000000000..cfcba0aed --- /dev/null +++ b/vendor/github.com/openshift/api/quota/v1/stable.clusterresourcequota.testsuite.yaml @@ -0,0 +1,18 @@ +apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this +name: "[Stable] ClusterResourceQuota" +crd: 0000_03_quota-openshift_01_clusterresourcequota.crd.yaml +tests: + onCreate: + - name: Should be able to create a minimal ClusterResourceQuota + initial: | + apiVersion: quota.openshift.io/v1 + kind: ClusterResourceQuota + spec: + selector: {} + quota: {} + expected: | + apiVersion: quota.openshift.io/v1 + kind: ClusterResourceQuota + spec: + selector: {} + quota: {} diff --git a/vendor/github.com/openshift/api/quota/v1/types.go b/vendor/github.com/openshift/api/quota/v1/types.go new file mode 100644 index 000000000..9742331f7 --- /dev/null +++ b/vendor/github.com/openshift/api/quota/v1/types.go @@ -0,0 +1,139 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterResourceQuota mirrors ResourceQuota at a cluster scope. This object is easily convertible to +// synthetic ResourceQuota object to allow quota evaluation re-use. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ClusterResourceQuota struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` + + // Spec defines the desired quota + Spec ClusterResourceQuotaSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + + // Status defines the actual enforced quota and its current usage + Status ClusterResourceQuotaStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// ClusterResourceQuotaSpec defines the desired quota restrictions +type ClusterResourceQuotaSpec struct { + // Selector is the selector used to match projects. + // It should only select active projects on the scale of dozens (though it can select + // many more less active projects). These projects will contend on object creation through + // this resource. + Selector ClusterResourceQuotaSelector `json:"selector" protobuf:"bytes,1,opt,name=selector"` + + // Quota defines the desired quota + Quota corev1.ResourceQuotaSpec `json:"quota" protobuf:"bytes,2,opt,name=quota"` +} + +// ClusterResourceQuotaSelector is used to select projects. At least one of LabelSelector or AnnotationSelector +// must present. If only one is present, it is the only selection criteria. If both are specified, +// the project must match both restrictions. +type ClusterResourceQuotaSelector struct { + // LabelSelector is used to select projects by label. + // +optional + // +nullable + LabelSelector *metav1.LabelSelector `json:"labels" protobuf:"bytes,1,opt,name=labels"` + + // AnnotationSelector is used to select projects by annotation. + // +optional + // +nullable + AnnotationSelector map[string]string `json:"annotations" protobuf:"bytes,2,rep,name=annotations"` +} + +// ClusterResourceQuotaStatus defines the actual enforced quota and its current usage +type ClusterResourceQuotaStatus struct { + // Total defines the actual enforced quota and its current usage across all projects + Total corev1.ResourceQuotaStatus `json:"total" protobuf:"bytes,1,opt,name=total"` + + // Namespaces slices the usage by project. This division allows for quick resolution of + // deletion reconciliation inside of a single project without requiring a recalculation + // across all projects. This can be used to pull the deltas for a given project. + // +optional + // +nullable + Namespaces ResourceQuotasStatusByNamespace `json:"namespaces" protobuf:"bytes,2,rep,name=namespaces"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterResourceQuotaList is a collection of ClusterResourceQuotas +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ClusterResourceQuotaList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of ClusterResourceQuotas + Items []ClusterResourceQuota `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// ResourceQuotasStatusByNamespace bundles multiple ResourceQuotaStatusByNamespace +type ResourceQuotasStatusByNamespace []ResourceQuotaStatusByNamespace + +// ResourceQuotaStatusByNamespace gives status for a particular project +type ResourceQuotaStatusByNamespace struct { + // Namespace the project this status applies to + Namespace string `json:"namespace" protobuf:"bytes,1,opt,name=namespace"` + + // Status indicates how many resources have been consumed by this project + Status corev1.ResourceQuotaStatus `json:"status" protobuf:"bytes,2,opt,name=status"` +} + +// +genclient +// +genclient:onlyVerbs=get,list +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// AppliedClusterResourceQuota mirrors ClusterResourceQuota at a project scope, for projection +// into a project. It allows a project-admin to know which ClusterResourceQuotas are applied to +// his project and their associated usage. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type AppliedClusterResourceQuota struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` + + // Spec defines the desired quota + Spec ClusterResourceQuotaSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + + // Status defines the actual enforced quota and its current usage + Status ClusterResourceQuotaStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// AppliedClusterResourceQuotaList is a collection of AppliedClusterResourceQuotas +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type AppliedClusterResourceQuotaList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of AppliedClusterResourceQuota + Items []AppliedClusterResourceQuota `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/github.com/openshift/api/quota/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/quota/v1/zz_generated.deepcopy.go new file mode 100644 index 000000000..72ac882fb --- /dev/null +++ b/vendor/github.com/openshift/api/quota/v1/zz_generated.deepcopy.go @@ -0,0 +1,242 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppliedClusterResourceQuota) DeepCopyInto(out *AppliedClusterResourceQuota) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppliedClusterResourceQuota. +func (in *AppliedClusterResourceQuota) DeepCopy() *AppliedClusterResourceQuota { + if in == nil { + return nil + } + out := new(AppliedClusterResourceQuota) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AppliedClusterResourceQuota) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AppliedClusterResourceQuotaList) DeepCopyInto(out *AppliedClusterResourceQuotaList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]AppliedClusterResourceQuota, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AppliedClusterResourceQuotaList. +func (in *AppliedClusterResourceQuotaList) DeepCopy() *AppliedClusterResourceQuotaList { + if in == nil { + return nil + } + out := new(AppliedClusterResourceQuotaList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AppliedClusterResourceQuotaList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterResourceQuota) DeepCopyInto(out *ClusterResourceQuota) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterResourceQuota. +func (in *ClusterResourceQuota) DeepCopy() *ClusterResourceQuota { + if in == nil { + return nil + } + out := new(ClusterResourceQuota) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterResourceQuota) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterResourceQuotaList) DeepCopyInto(out *ClusterResourceQuotaList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterResourceQuota, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterResourceQuotaList. +func (in *ClusterResourceQuotaList) DeepCopy() *ClusterResourceQuotaList { + if in == nil { + return nil + } + out := new(ClusterResourceQuotaList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterResourceQuotaList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterResourceQuotaSelector) DeepCopyInto(out *ClusterResourceQuotaSelector) { + *out = *in + if in.LabelSelector != nil { + in, out := &in.LabelSelector, &out.LabelSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.AnnotationSelector != nil { + in, out := &in.AnnotationSelector, &out.AnnotationSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterResourceQuotaSelector. +func (in *ClusterResourceQuotaSelector) DeepCopy() *ClusterResourceQuotaSelector { + if in == nil { + return nil + } + out := new(ClusterResourceQuotaSelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterResourceQuotaSpec) DeepCopyInto(out *ClusterResourceQuotaSpec) { + *out = *in + in.Selector.DeepCopyInto(&out.Selector) + in.Quota.DeepCopyInto(&out.Quota) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterResourceQuotaSpec. +func (in *ClusterResourceQuotaSpec) DeepCopy() *ClusterResourceQuotaSpec { + if in == nil { + return nil + } + out := new(ClusterResourceQuotaSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterResourceQuotaStatus) DeepCopyInto(out *ClusterResourceQuotaStatus) { + *out = *in + in.Total.DeepCopyInto(&out.Total) + if in.Namespaces != nil { + in, out := &in.Namespaces, &out.Namespaces + *out = make(ResourceQuotasStatusByNamespace, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterResourceQuotaStatus. +func (in *ClusterResourceQuotaStatus) DeepCopy() *ClusterResourceQuotaStatus { + if in == nil { + return nil + } + out := new(ClusterResourceQuotaStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceQuotaStatusByNamespace) DeepCopyInto(out *ResourceQuotaStatusByNamespace) { + *out = *in + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceQuotaStatusByNamespace. +func (in *ResourceQuotaStatusByNamespace) DeepCopy() *ResourceQuotaStatusByNamespace { + if in == nil { + return nil + } + out := new(ResourceQuotaStatusByNamespace) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in ResourceQuotasStatusByNamespace) DeepCopyInto(out *ResourceQuotasStatusByNamespace) { + { + in := &in + *out = make(ResourceQuotasStatusByNamespace, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceQuotasStatusByNamespace. +func (in ResourceQuotasStatusByNamespace) DeepCopy() ResourceQuotasStatusByNamespace { + if in == nil { + return nil + } + out := new(ResourceQuotasStatusByNamespace) + in.DeepCopyInto(out) + return *out +} diff --git a/vendor/github.com/openshift/api/quota/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/quota/v1/zz_generated.swagger_doc_generated.go new file mode 100644 index 000000000..3072671c5 --- /dev/null +++ b/vendor/github.com/openshift/api/quota/v1/zz_generated.swagger_doc_generated.go @@ -0,0 +1,96 @@ +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_AppliedClusterResourceQuota = map[string]string{ + "": "AppliedClusterResourceQuota mirrors ClusterResourceQuota at a project scope, for projection into a project. It allows a project-admin to know which ClusterResourceQuotas are applied to his project and their associated usage.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Spec defines the desired quota", + "status": "Status defines the actual enforced quota and its current usage", +} + +func (AppliedClusterResourceQuota) SwaggerDoc() map[string]string { + return map_AppliedClusterResourceQuota +} + +var map_AppliedClusterResourceQuotaList = map[string]string{ + "": "AppliedClusterResourceQuotaList is a collection of AppliedClusterResourceQuotas\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "Items is a list of AppliedClusterResourceQuota", +} + +func (AppliedClusterResourceQuotaList) SwaggerDoc() map[string]string { + return map_AppliedClusterResourceQuotaList +} + +var map_ClusterResourceQuota = map[string]string{ + "": "ClusterResourceQuota mirrors ResourceQuota at a cluster scope. This object is easily convertible to synthetic ResourceQuota object to allow quota evaluation re-use.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Spec defines the desired quota", + "status": "Status defines the actual enforced quota and its current usage", +} + +func (ClusterResourceQuota) SwaggerDoc() map[string]string { + return map_ClusterResourceQuota +} + +var map_ClusterResourceQuotaList = map[string]string{ + "": "ClusterResourceQuotaList is a collection of ClusterResourceQuotas\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "Items is a list of ClusterResourceQuotas", +} + +func (ClusterResourceQuotaList) SwaggerDoc() map[string]string { + return map_ClusterResourceQuotaList +} + +var map_ClusterResourceQuotaSelector = map[string]string{ + "": "ClusterResourceQuotaSelector is used to select projects. At least one of LabelSelector or AnnotationSelector must present. If only one is present, it is the only selection criteria. If both are specified, the project must match both restrictions.", + "labels": "LabelSelector is used to select projects by label.", + "annotations": "AnnotationSelector is used to select projects by annotation.", +} + +func (ClusterResourceQuotaSelector) SwaggerDoc() map[string]string { + return map_ClusterResourceQuotaSelector +} + +var map_ClusterResourceQuotaSpec = map[string]string{ + "": "ClusterResourceQuotaSpec defines the desired quota restrictions", + "selector": "Selector is the selector used to match projects. It should only select active projects on the scale of dozens (though it can select many more less active projects). These projects will contend on object creation through this resource.", + "quota": "Quota defines the desired quota", +} + +func (ClusterResourceQuotaSpec) SwaggerDoc() map[string]string { + return map_ClusterResourceQuotaSpec +} + +var map_ClusterResourceQuotaStatus = map[string]string{ + "": "ClusterResourceQuotaStatus defines the actual enforced quota and its current usage", + "total": "Total defines the actual enforced quota and its current usage across all projects", + "namespaces": "Namespaces slices the usage by project. This division allows for quick resolution of deletion reconciliation inside of a single project without requiring a recalculation across all projects. This can be used to pull the deltas for a given project.", +} + +func (ClusterResourceQuotaStatus) SwaggerDoc() map[string]string { + return map_ClusterResourceQuotaStatus +} + +var map_ResourceQuotaStatusByNamespace = map[string]string{ + "": "ResourceQuotaStatusByNamespace gives status for a particular project", + "namespace": "Namespace the project this status applies to", + "status": "Status indicates how many resources have been consumed by this project", +} + +func (ResourceQuotaStatusByNamespace) SwaggerDoc() map[string]string { + return map_ResourceQuotaStatusByNamespace +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/api/route/.codegen.yaml b/vendor/github.com/openshift/api/route/.codegen.yaml new file mode 100644 index 000000000..d2791f7b5 --- /dev/null +++ b/vendor/github.com/openshift/api/route/.codegen.yaml @@ -0,0 +1,8 @@ +schemapatch: + requiredFeatureSets: + - '' + - 'Default' + - 'TechPreviewNoUpgrade' + - 'CustomNoUpgrade' +swaggerdocs: + commentPolicy: Warn diff --git a/vendor/github.com/openshift/api/route/OWNERS b/vendor/github.com/openshift/api/route/OWNERS new file mode 100644 index 000000000..74038975d --- /dev/null +++ b/vendor/github.com/openshift/api/route/OWNERS @@ -0,0 +1,5 @@ +reviewers: + - ironcladlou + - knobunc + - pravisankar + - Miciah diff --git a/vendor/github.com/openshift/api/route/install.go b/vendor/github.com/openshift/api/route/install.go new file mode 100644 index 000000000..a08536283 --- /dev/null +++ b/vendor/github.com/openshift/api/route/install.go @@ -0,0 +1,26 @@ +package route + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + routev1 "github.com/openshift/api/route/v1" +) + +const ( + GroupName = "route.openshift.io" +) + +var ( + schemeBuilder = runtime.NewSchemeBuilder(routev1.Install) + // Install is a function which adds every version of this group to a scheme + Install = schemeBuilder.AddToScheme +) + +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +func Kind(kind string) schema.GroupKind { + return schema.GroupKind{Group: GroupName, Kind: kind} +} diff --git a/vendor/github.com/openshift/api/route/v1/Makefile b/vendor/github.com/openshift/api/route/v1/Makefile new file mode 100644 index 000000000..0e6057620 --- /dev/null +++ b/vendor/github.com/openshift/api/route/v1/Makefile @@ -0,0 +1,3 @@ +.PHONY: test +test: + make -C ../../tests test GINKGO_EXTRA_ARGS=--focus="route.openshift.io/v1" diff --git a/vendor/github.com/openshift/api/route/v1/custom.route.testsuite.yaml b/vendor/github.com/openshift/api/route/v1/custom.route.testsuite.yaml new file mode 100644 index 000000000..4a8042fc1 --- /dev/null +++ b/vendor/github.com/openshift/api/route/v1/custom.route.testsuite.yaml @@ -0,0 +1,103 @@ +apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this +name: '[CustomNoUpgrade] Route' +crd: route-CustomNoUpgrade.crd.yaml +tests: + onCreate: + - name: Should be able to create a minimal Route + initial: | + apiVersion: route.openshift.io/v1 + kind: Route + spec: + to: + kind: Service + name: foo + expected: | + apiVersion: route.openshift.io/v1 + kind: Route + spec: + to: + kind: Service + name: foo + weight: 100 + wildcardPolicy: None + - name: 'cannot have both spec.tls.termination: passthrough and spec.tls.insecureEdgeTerminationPolicy: Allow' + initial: | + apiVersion: route.openshift.io/v1 + kind: Route + spec: + to: + kind: Service + name: foo + tls: + termination: passthrough + insecureEdgeTerminationPolicy: Allow + expectedError: 'cannot have both spec.tls.termination: passthrough and spec.tls.insecureEdgeTerminationPolicy: Allow' + - name: 'spec.tls.termination: passthrough is compatible with spec.tls.insecureEdgeTerminationPolicy: Redirect' + initial: | + apiVersion: route.openshift.io/v1 + kind: Route + spec: + host: test.foo + to: + kind: Service + name: foo + tls: + termination: passthrough + insecureEdgeTerminationPolicy: Redirect + expected: | + apiVersion: route.openshift.io/v1 + kind: Route + spec: + host: test.foo + to: + kind: Service + name: foo + weight: 100 + tls: + termination: passthrough + insecureEdgeTerminationPolicy: Redirect + wildcardPolicy: None + - name: 'spec.tls.termination: passthrough is compatible with spec.tls.insecureEdgeTerminationPolicy: None' + initial: | + apiVersion: route.openshift.io/v1 + kind: Route + spec: + host: test.foo + to: + kind: Service + name: foo + tls: + termination: passthrough + insecureEdgeTerminationPolicy: None + expected: | + apiVersion: route.openshift.io/v1 + kind: Route + spec: + host: test.foo + to: + kind: Service + name: foo + weight: 100 + tls: + termination: passthrough + insecureEdgeTerminationPolicy: None + wildcardPolicy: None + - name: 'cannot have both spec.tls.certificate and spec.tls.externalCertificate' + initial: | + apiVersion: route.openshift.io/v1 + kind: Route + spec: + to: + kind: Service + name: foo + tls: + termination: edge + key: |- + -----BEGIN RSA PRIVATE KEY----- + -----END RSA PRIVATE KEY----- + certificate: |- + -----BEGIN CERTIFICATE----- + -----END CERTIFICATE----- + externalCertificate: + name: "my-local-secret" + expectedError: 'Invalid value: "object": cannot have both spec.tls.certificate and spec.tls.externalCertificate' diff --git a/vendor/github.com/openshift/api/route/v1/doc.go b/vendor/github.com/openshift/api/route/v1/doc.go new file mode 100644 index 000000000..e56fbbd8d --- /dev/null +++ b/vendor/github.com/openshift/api/route/v1/doc.go @@ -0,0 +1,8 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=github.com/openshift/origin/pkg/route/apis/route +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true + +// +groupName=route.openshift.io +// Package v1 is the v1 version of the API. +package v1 diff --git a/vendor/github.com/openshift/api/route/v1/generated.pb.go b/vendor/github.com/openshift/api/route/v1/generated.pb.go new file mode 100644 index 000000000..2adcd1cc8 --- /dev/null +++ b/vendor/github.com/openshift/api/route/v1/generated.pb.go @@ -0,0 +1,4276 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/openshift/api/route/v1/generated.proto + +package v1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + + k8s_io_api_core_v1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *LocalObjectReference) Reset() { *m = LocalObjectReference{} } +func (*LocalObjectReference) ProtoMessage() {} +func (*LocalObjectReference) Descriptor() ([]byte, []int) { + return fileDescriptor_373b8fa7ff738721, []int{0} +} +func (m *LocalObjectReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LocalObjectReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LocalObjectReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_LocalObjectReference.Merge(m, src) +} +func (m *LocalObjectReference) XXX_Size() int { + return m.Size() +} +func (m *LocalObjectReference) XXX_DiscardUnknown() { + xxx_messageInfo_LocalObjectReference.DiscardUnknown(m) +} + +var xxx_messageInfo_LocalObjectReference proto.InternalMessageInfo + +func (m *Route) Reset() { *m = Route{} } +func (*Route) ProtoMessage() {} +func (*Route) Descriptor() ([]byte, []int) { + return fileDescriptor_373b8fa7ff738721, []int{1} +} +func (m *Route) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Route) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Route) XXX_Merge(src proto.Message) { + xxx_messageInfo_Route.Merge(m, src) +} +func (m *Route) XXX_Size() int { + return m.Size() +} +func (m *Route) XXX_DiscardUnknown() { + xxx_messageInfo_Route.DiscardUnknown(m) +} + +var xxx_messageInfo_Route proto.InternalMessageInfo + +func (m *RouteHTTPHeader) Reset() { *m = RouteHTTPHeader{} } +func (*RouteHTTPHeader) ProtoMessage() {} +func (*RouteHTTPHeader) Descriptor() ([]byte, []int) { + return fileDescriptor_373b8fa7ff738721, []int{2} +} +func (m *RouteHTTPHeader) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RouteHTTPHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RouteHTTPHeader) XXX_Merge(src proto.Message) { + xxx_messageInfo_RouteHTTPHeader.Merge(m, src) +} +func (m *RouteHTTPHeader) XXX_Size() int { + return m.Size() +} +func (m *RouteHTTPHeader) XXX_DiscardUnknown() { + xxx_messageInfo_RouteHTTPHeader.DiscardUnknown(m) +} + +var xxx_messageInfo_RouteHTTPHeader proto.InternalMessageInfo + +func (m *RouteHTTPHeaderActionUnion) Reset() { *m = RouteHTTPHeaderActionUnion{} } +func (*RouteHTTPHeaderActionUnion) ProtoMessage() {} +func (*RouteHTTPHeaderActionUnion) Descriptor() ([]byte, []int) { + return fileDescriptor_373b8fa7ff738721, []int{3} +} +func (m *RouteHTTPHeaderActionUnion) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RouteHTTPHeaderActionUnion) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RouteHTTPHeaderActionUnion) XXX_Merge(src proto.Message) { + xxx_messageInfo_RouteHTTPHeaderActionUnion.Merge(m, src) +} +func (m *RouteHTTPHeaderActionUnion) XXX_Size() int { + return m.Size() +} +func (m *RouteHTTPHeaderActionUnion) XXX_DiscardUnknown() { + xxx_messageInfo_RouteHTTPHeaderActionUnion.DiscardUnknown(m) +} + +var xxx_messageInfo_RouteHTTPHeaderActionUnion proto.InternalMessageInfo + +func (m *RouteHTTPHeaderActions) Reset() { *m = RouteHTTPHeaderActions{} } +func (*RouteHTTPHeaderActions) ProtoMessage() {} +func (*RouteHTTPHeaderActions) Descriptor() ([]byte, []int) { + return fileDescriptor_373b8fa7ff738721, []int{4} +} +func (m *RouteHTTPHeaderActions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RouteHTTPHeaderActions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RouteHTTPHeaderActions) XXX_Merge(src proto.Message) { + xxx_messageInfo_RouteHTTPHeaderActions.Merge(m, src) +} +func (m *RouteHTTPHeaderActions) XXX_Size() int { + return m.Size() +} +func (m *RouteHTTPHeaderActions) XXX_DiscardUnknown() { + xxx_messageInfo_RouteHTTPHeaderActions.DiscardUnknown(m) +} + +var xxx_messageInfo_RouteHTTPHeaderActions proto.InternalMessageInfo + +func (m *RouteHTTPHeaders) Reset() { *m = RouteHTTPHeaders{} } +func (*RouteHTTPHeaders) ProtoMessage() {} +func (*RouteHTTPHeaders) Descriptor() ([]byte, []int) { + return fileDescriptor_373b8fa7ff738721, []int{5} +} +func (m *RouteHTTPHeaders) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RouteHTTPHeaders) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RouteHTTPHeaders) XXX_Merge(src proto.Message) { + xxx_messageInfo_RouteHTTPHeaders.Merge(m, src) +} +func (m *RouteHTTPHeaders) XXX_Size() int { + return m.Size() +} +func (m *RouteHTTPHeaders) XXX_DiscardUnknown() { + xxx_messageInfo_RouteHTTPHeaders.DiscardUnknown(m) +} + +var xxx_messageInfo_RouteHTTPHeaders proto.InternalMessageInfo + +func (m *RouteIngress) Reset() { *m = RouteIngress{} } +func (*RouteIngress) ProtoMessage() {} +func (*RouteIngress) Descriptor() ([]byte, []int) { + return fileDescriptor_373b8fa7ff738721, []int{6} +} +func (m *RouteIngress) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RouteIngress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RouteIngress) XXX_Merge(src proto.Message) { + xxx_messageInfo_RouteIngress.Merge(m, src) +} +func (m *RouteIngress) XXX_Size() int { + return m.Size() +} +func (m *RouteIngress) XXX_DiscardUnknown() { + xxx_messageInfo_RouteIngress.DiscardUnknown(m) +} + +var xxx_messageInfo_RouteIngress proto.InternalMessageInfo + +func (m *RouteIngressCondition) Reset() { *m = RouteIngressCondition{} } +func (*RouteIngressCondition) ProtoMessage() {} +func (*RouteIngressCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_373b8fa7ff738721, []int{7} +} +func (m *RouteIngressCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RouteIngressCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RouteIngressCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_RouteIngressCondition.Merge(m, src) +} +func (m *RouteIngressCondition) XXX_Size() int { + return m.Size() +} +func (m *RouteIngressCondition) XXX_DiscardUnknown() { + xxx_messageInfo_RouteIngressCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_RouteIngressCondition proto.InternalMessageInfo + +func (m *RouteList) Reset() { *m = RouteList{} } +func (*RouteList) ProtoMessage() {} +func (*RouteList) Descriptor() ([]byte, []int) { + return fileDescriptor_373b8fa7ff738721, []int{8} +} +func (m *RouteList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RouteList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RouteList) XXX_Merge(src proto.Message) { + xxx_messageInfo_RouteList.Merge(m, src) +} +func (m *RouteList) XXX_Size() int { + return m.Size() +} +func (m *RouteList) XXX_DiscardUnknown() { + xxx_messageInfo_RouteList.DiscardUnknown(m) +} + +var xxx_messageInfo_RouteList proto.InternalMessageInfo + +func (m *RoutePort) Reset() { *m = RoutePort{} } +func (*RoutePort) ProtoMessage() {} +func (*RoutePort) Descriptor() ([]byte, []int) { + return fileDescriptor_373b8fa7ff738721, []int{9} +} +func (m *RoutePort) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RoutePort) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RoutePort) XXX_Merge(src proto.Message) { + xxx_messageInfo_RoutePort.Merge(m, src) +} +func (m *RoutePort) XXX_Size() int { + return m.Size() +} +func (m *RoutePort) XXX_DiscardUnknown() { + xxx_messageInfo_RoutePort.DiscardUnknown(m) +} + +var xxx_messageInfo_RoutePort proto.InternalMessageInfo + +func (m *RouteSetHTTPHeader) Reset() { *m = RouteSetHTTPHeader{} } +func (*RouteSetHTTPHeader) ProtoMessage() {} +func (*RouteSetHTTPHeader) Descriptor() ([]byte, []int) { + return fileDescriptor_373b8fa7ff738721, []int{10} +} +func (m *RouteSetHTTPHeader) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RouteSetHTTPHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RouteSetHTTPHeader) XXX_Merge(src proto.Message) { + xxx_messageInfo_RouteSetHTTPHeader.Merge(m, src) +} +func (m *RouteSetHTTPHeader) XXX_Size() int { + return m.Size() +} +func (m *RouteSetHTTPHeader) XXX_DiscardUnknown() { + xxx_messageInfo_RouteSetHTTPHeader.DiscardUnknown(m) +} + +var xxx_messageInfo_RouteSetHTTPHeader proto.InternalMessageInfo + +func (m *RouteSpec) Reset() { *m = RouteSpec{} } +func (*RouteSpec) ProtoMessage() {} +func (*RouteSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_373b8fa7ff738721, []int{11} +} +func (m *RouteSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RouteSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RouteSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_RouteSpec.Merge(m, src) +} +func (m *RouteSpec) XXX_Size() int { + return m.Size() +} +func (m *RouteSpec) XXX_DiscardUnknown() { + xxx_messageInfo_RouteSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_RouteSpec proto.InternalMessageInfo + +func (m *RouteStatus) Reset() { *m = RouteStatus{} } +func (*RouteStatus) ProtoMessage() {} +func (*RouteStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_373b8fa7ff738721, []int{12} +} +func (m *RouteStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RouteStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RouteStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_RouteStatus.Merge(m, src) +} +func (m *RouteStatus) XXX_Size() int { + return m.Size() +} +func (m *RouteStatus) XXX_DiscardUnknown() { + xxx_messageInfo_RouteStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_RouteStatus proto.InternalMessageInfo + +func (m *RouteTargetReference) Reset() { *m = RouteTargetReference{} } +func (*RouteTargetReference) ProtoMessage() {} +func (*RouteTargetReference) Descriptor() ([]byte, []int) { + return fileDescriptor_373b8fa7ff738721, []int{13} +} +func (m *RouteTargetReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RouteTargetReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RouteTargetReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_RouteTargetReference.Merge(m, src) +} +func (m *RouteTargetReference) XXX_Size() int { + return m.Size() +} +func (m *RouteTargetReference) XXX_DiscardUnknown() { + xxx_messageInfo_RouteTargetReference.DiscardUnknown(m) +} + +var xxx_messageInfo_RouteTargetReference proto.InternalMessageInfo + +func (m *RouterShard) Reset() { *m = RouterShard{} } +func (*RouterShard) ProtoMessage() {} +func (*RouterShard) Descriptor() ([]byte, []int) { + return fileDescriptor_373b8fa7ff738721, []int{14} +} +func (m *RouterShard) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RouterShard) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RouterShard) XXX_Merge(src proto.Message) { + xxx_messageInfo_RouterShard.Merge(m, src) +} +func (m *RouterShard) XXX_Size() int { + return m.Size() +} +func (m *RouterShard) XXX_DiscardUnknown() { + xxx_messageInfo_RouterShard.DiscardUnknown(m) +} + +var xxx_messageInfo_RouterShard proto.InternalMessageInfo + +func (m *TLSConfig) Reset() { *m = TLSConfig{} } +func (*TLSConfig) ProtoMessage() {} +func (*TLSConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_373b8fa7ff738721, []int{15} +} +func (m *TLSConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TLSConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TLSConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_TLSConfig.Merge(m, src) +} +func (m *TLSConfig) XXX_Size() int { + return m.Size() +} +func (m *TLSConfig) XXX_DiscardUnknown() { + xxx_messageInfo_TLSConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_TLSConfig proto.InternalMessageInfo + +func init() { + proto.RegisterType((*LocalObjectReference)(nil), "github.com.openshift.api.route.v1.LocalObjectReference") + proto.RegisterType((*Route)(nil), "github.com.openshift.api.route.v1.Route") + proto.RegisterType((*RouteHTTPHeader)(nil), "github.com.openshift.api.route.v1.RouteHTTPHeader") + proto.RegisterType((*RouteHTTPHeaderActionUnion)(nil), "github.com.openshift.api.route.v1.RouteHTTPHeaderActionUnion") + proto.RegisterType((*RouteHTTPHeaderActions)(nil), "github.com.openshift.api.route.v1.RouteHTTPHeaderActions") + proto.RegisterType((*RouteHTTPHeaders)(nil), "github.com.openshift.api.route.v1.RouteHTTPHeaders") + proto.RegisterType((*RouteIngress)(nil), "github.com.openshift.api.route.v1.RouteIngress") + proto.RegisterType((*RouteIngressCondition)(nil), "github.com.openshift.api.route.v1.RouteIngressCondition") + proto.RegisterType((*RouteList)(nil), "github.com.openshift.api.route.v1.RouteList") + proto.RegisterType((*RoutePort)(nil), "github.com.openshift.api.route.v1.RoutePort") + proto.RegisterType((*RouteSetHTTPHeader)(nil), "github.com.openshift.api.route.v1.RouteSetHTTPHeader") + proto.RegisterType((*RouteSpec)(nil), "github.com.openshift.api.route.v1.RouteSpec") + proto.RegisterType((*RouteStatus)(nil), "github.com.openshift.api.route.v1.RouteStatus") + proto.RegisterType((*RouteTargetReference)(nil), "github.com.openshift.api.route.v1.RouteTargetReference") + proto.RegisterType((*RouterShard)(nil), "github.com.openshift.api.route.v1.RouterShard") + proto.RegisterType((*TLSConfig)(nil), "github.com.openshift.api.route.v1.TLSConfig") +} + +func init() { + proto.RegisterFile("github.com/openshift/api/route/v1/generated.proto", fileDescriptor_373b8fa7ff738721) +} + +var fileDescriptor_373b8fa7ff738721 = []byte{ + // 1420 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x58, 0xdd, 0x6e, 0x13, 0xc7, + 0x17, 0xcf, 0xc6, 0x76, 0x1c, 0x8f, 0xf9, 0x1c, 0xbe, 0x4c, 0x24, 0x6c, 0xd8, 0xbf, 0xf4, 0x17, + 0x54, 0x74, 0xdd, 0x04, 0x68, 0x41, 0x15, 0x17, 0x6c, 0x40, 0x10, 0x30, 0x21, 0x1a, 0xbb, 0xa0, + 0x22, 0x2a, 0x75, 0xb2, 0x3b, 0xb6, 0xa7, 0xb1, 0x67, 0x97, 0x99, 0x71, 0x20, 0x37, 0x15, 0x6a, + 0x5f, 0x80, 0xde, 0xf6, 0x15, 0xaa, 0xde, 0xf7, 0x11, 0xb8, 0xe4, 0x92, 0xde, 0x58, 0x8d, 0x7b, + 0xd9, 0x37, 0xc8, 0x55, 0x35, 0xb3, 0xe3, 0xdd, 0xb5, 0x63, 0x13, 0x07, 0xf5, 0xce, 0x7b, 0xce, + 0xf9, 0xfd, 0xce, 0xc7, 0x9c, 0x39, 0x67, 0x12, 0xb0, 0xdc, 0xa2, 0xb2, 0xdd, 0xdb, 0x74, 0xbc, + 0xa0, 0x5b, 0x0d, 0x42, 0xc2, 0x44, 0x9b, 0x36, 0x65, 0x15, 0x87, 0xb4, 0xca, 0x83, 0x9e, 0x24, + 0xd5, 0xed, 0xe5, 0x6a, 0x8b, 0x30, 0xc2, 0xb1, 0x24, 0xbe, 0x13, 0xf2, 0x40, 0x06, 0xf0, 0x52, + 0x02, 0x71, 0x62, 0x88, 0x83, 0x43, 0xea, 0x68, 0x88, 0xb3, 0xbd, 0xbc, 0xf4, 0x79, 0x8a, 0xb5, + 0x15, 0xb4, 0x82, 0xaa, 0x46, 0x6e, 0xf6, 0x9a, 0xfa, 0x4b, 0x7f, 0xe8, 0x5f, 0x11, 0xe3, 0x92, + 0xbd, 0x75, 0x53, 0x38, 0x34, 0xd0, 0x6e, 0xbd, 0x80, 0x4f, 0xf2, 0xba, 0x74, 0x3d, 0xb1, 0xe9, + 0x62, 0xaf, 0x4d, 0x19, 0xe1, 0x3b, 0xd5, 0x70, 0xab, 0xa5, 0x04, 0xa2, 0xda, 0x25, 0x12, 0x4f, + 0x42, 0x7d, 0x39, 0x0d, 0xc5, 0x7b, 0x4c, 0xd2, 0x2e, 0xa9, 0x0a, 0xaf, 0x4d, 0xba, 0x78, 0x1f, + 0xee, 0xda, 0x34, 0x5c, 0x4f, 0xd2, 0x4e, 0x95, 0x32, 0x29, 0x24, 0x1f, 0x07, 0xd9, 0x37, 0xc1, + 0xe9, 0x5a, 0xe0, 0xe1, 0xce, 0x93, 0xcd, 0x1f, 0x88, 0x27, 0x11, 0x69, 0x12, 0x4e, 0x98, 0x47, + 0xe0, 0x45, 0x90, 0x65, 0xb8, 0x4b, 0x4a, 0xd6, 0x45, 0xeb, 0x72, 0xc1, 0x3d, 0xf2, 0xae, 0x5f, + 0x99, 0x1b, 0xf4, 0x2b, 0xd9, 0x75, 0xdc, 0x25, 0x48, 0x6b, 0xec, 0x5f, 0xe6, 0x41, 0x0e, 0xa9, + 0xe2, 0xc1, 0xef, 0xc1, 0xa2, 0xca, 0xc5, 0xc7, 0x12, 0x6b, 0xfb, 0xe2, 0xca, 0x17, 0x4e, 0x14, + 0x8b, 0x93, 0x8e, 0xc5, 0x09, 0xb7, 0x5a, 0x4a, 0x20, 0x1c, 0x65, 0xed, 0x6c, 0x2f, 0x3b, 0x91, + 0xd3, 0xc7, 0x44, 0x62, 0x17, 0x1a, 0x0f, 0x20, 0x91, 0xa1, 0x98, 0x15, 0xae, 0x83, 0xac, 0x08, + 0x89, 0x57, 0x9a, 0xd7, 0xec, 0x57, 0x9d, 0x03, 0x4f, 0xd3, 0xd1, 0x91, 0xd5, 0x43, 0xe2, 0x25, + 0xb1, 0xab, 0x2f, 0xa4, 0x79, 0xe0, 0x53, 0xb0, 0x20, 0x24, 0x96, 0x3d, 0x51, 0xca, 0x68, 0x46, + 0x67, 0x66, 0x46, 0x8d, 0x72, 0x8f, 0x19, 0xce, 0x85, 0xe8, 0x1b, 0x19, 0x36, 0xfb, 0x57, 0x0b, + 0x1c, 0xd7, 0x76, 0x0f, 0x1a, 0x8d, 0x8d, 0x07, 0x04, 0xfb, 0x84, 0x1f, 0x5c, 0x49, 0x48, 0xc0, + 0x02, 0xf6, 0x24, 0x0d, 0x98, 0xc9, 0xef, 0xf6, 0xac, 0xd1, 0x24, 0x5e, 0xee, 0x68, 0xfc, 0x37, + 0x8c, 0x06, 0x2c, 0x09, 0x2e, 0x12, 0x22, 0x43, 0x6e, 0xff, 0x6e, 0x81, 0xa5, 0xe9, 0x30, 0x78, + 0x1b, 0x64, 0xe5, 0x4e, 0x38, 0x8c, 0xf3, 0xca, 0x30, 0xce, 0xc6, 0x4e, 0x48, 0xf6, 0xfa, 0x95, + 0xf3, 0x13, 0x91, 0x4a, 0x89, 0x34, 0x0c, 0x6e, 0x80, 0x8c, 0x20, 0xd2, 0x64, 0x70, 0x63, 0xe6, + 0x7a, 0x12, 0x99, 0x70, 0xba, 0xf9, 0x41, 0xbf, 0x92, 0xa9, 0x13, 0x89, 0x14, 0x95, 0xfd, 0xa7, + 0x05, 0xce, 0x4e, 0xf4, 0x2a, 0x54, 0xc7, 0x71, 0x22, 0xc2, 0x80, 0x09, 0x15, 0x6f, 0xe6, 0x72, + 0x71, 0x65, 0xe5, 0xf0, 0x35, 0x73, 0x4f, 0x98, 0x1c, 0x17, 0x91, 0xe1, 0x42, 0x31, 0x2b, 0xfc, + 0x0e, 0xe4, 0x39, 0x79, 0xd9, 0x23, 0x42, 0xa5, 0xf4, 0xa9, 0x0e, 0x8e, 0x1b, 0x07, 0x79, 0x14, + 0x51, 0xa1, 0x21, 0xa7, 0xfd, 0x1a, 0x9c, 0x18, 0x33, 0x16, 0xd0, 0x07, 0xf9, 0xe8, 0xa4, 0x84, + 0xb9, 0x45, 0xb7, 0x3e, 0xb5, 0x0f, 0x44, 0xe2, 0xd9, 0x08, 0xd0, 0x90, 0xda, 0xfe, 0x39, 0x03, + 0x8e, 0x68, 0xd0, 0x1a, 0x6b, 0x71, 0x22, 0x84, 0xea, 0xcf, 0x76, 0x20, 0xe4, 0x78, 0x7f, 0x3e, + 0x08, 0x84, 0x44, 0x5a, 0x03, 0x57, 0x00, 0xd0, 0xfe, 0xb8, 0xea, 0x59, 0x7d, 0xc2, 0x85, 0xe4, + 0xbe, 0xa2, 0x58, 0x83, 0x52, 0x56, 0xb0, 0x03, 0x80, 0x17, 0x30, 0x9f, 0x46, 0xf9, 0x64, 0x74, + 0x09, 0x6f, 0xce, 0x9a, 0x8f, 0x09, 0x6d, 0x75, 0x48, 0x90, 0x78, 0x8b, 0x45, 0x02, 0xa5, 0xf8, + 0x61, 0x03, 0x1c, 0x7b, 0x45, 0x3b, 0xbe, 0x87, 0xb9, 0xbf, 0x11, 0x74, 0xa8, 0xb7, 0x53, 0xca, + 0xea, 0x28, 0xaf, 0x1a, 0xdc, 0xb1, 0x67, 0x23, 0xda, 0xbd, 0x7e, 0x05, 0x8e, 0x4a, 0x74, 0x23, + 0x8f, 0x71, 0xc0, 0x6f, 0xc1, 0xb9, 0x28, 0xa3, 0x55, 0xcc, 0x02, 0x46, 0x3d, 0xdc, 0x51, 0x45, + 0xd1, 0x97, 0x39, 0xa7, 0xe9, 0x2b, 0x86, 0xfe, 0x1c, 0x9a, 0x6c, 0x86, 0xa6, 0xe1, 0xed, 0x7f, + 0xe6, 0xc1, 0x99, 0x89, 0xa9, 0xce, 0x74, 0x0d, 0xc7, 0x41, 0xa9, 0x6b, 0x58, 0x8b, 0x27, 0x5b, + 0x74, 0x4e, 0xd7, 0x47, 0x27, 0xd5, 0x5e, 0xbf, 0x32, 0x61, 0x71, 0x39, 0x31, 0xd3, 0xe8, 0x3c, + 0x83, 0xff, 0x07, 0x0b, 0x9c, 0x60, 0x11, 0x30, 0x3d, 0x27, 0x0b, 0xc9, 0x68, 0x41, 0x5a, 0x8a, + 0x8c, 0x16, 0x5e, 0x01, 0xf9, 0x2e, 0x11, 0x02, 0xb7, 0x88, 0x29, 0x7c, 0xdc, 0x7f, 0x8f, 0x23, + 0x31, 0x1a, 0xea, 0x21, 0x07, 0xb0, 0x83, 0x85, 0x6c, 0x70, 0xcc, 0x44, 0x14, 0x3c, 0x35, 0xf5, + 0x2c, 0xae, 0x7c, 0x36, 0xdb, 0xda, 0x50, 0x08, 0xf7, 0xec, 0xa0, 0x5f, 0x81, 0xb5, 0x7d, 0x4c, + 0x68, 0x02, 0xbb, 0xfd, 0x87, 0x05, 0x0a, 0xba, 0x70, 0x35, 0x2a, 0x24, 0x7c, 0xb1, 0x6f, 0x5d, + 0x39, 0xb3, 0xf9, 0x55, 0x68, 0xbd, 0xac, 0xe2, 0xc1, 0x31, 0x94, 0xa4, 0x56, 0xd5, 0x63, 0x90, + 0xa3, 0x92, 0x74, 0x85, 0x19, 0x1b, 0x97, 0x67, 0xed, 0x79, 0xf7, 0xa8, 0x21, 0xcd, 0xad, 0x29, + 0x38, 0x8a, 0x58, 0xec, 0x97, 0x26, 0xf2, 0x8d, 0x80, 0x4b, 0xe8, 0x03, 0x20, 0x31, 0x6f, 0x11, + 0xa9, 0xbe, 0x0e, 0x5c, 0xb5, 0x6a, 0xed, 0x3b, 0xd1, 0xda, 0x77, 0xd6, 0x98, 0x7c, 0xc2, 0xeb, + 0x92, 0x53, 0xd6, 0x4a, 0x2e, 0x53, 0x23, 0xe6, 0x42, 0x29, 0x5e, 0xfb, 0x16, 0x80, 0xfb, 0x67, + 0x33, 0xfc, 0x1f, 0xc8, 0x6d, 0xe3, 0x4e, 0x6f, 0xd8, 0x98, 0x71, 0xb4, 0x4f, 0x95, 0x10, 0x45, + 0x3a, 0xfb, 0xb7, 0x9c, 0x09, 0x57, 0xed, 0xda, 0x19, 0x26, 0x4b, 0x15, 0x14, 0x44, 0x6f, 0xd3, + 0x0f, 0xba, 0x98, 0xb2, 0xd2, 0xa2, 0x36, 0x3b, 0x69, 0xcc, 0x0a, 0xf5, 0xa1, 0x02, 0x25, 0x36, + 0x8a, 0x32, 0xc4, 0xb2, 0x6d, 0x9a, 0x3b, 0xa6, 0xdc, 0xc0, 0xb2, 0x8d, 0xb4, 0x06, 0xd6, 0xc1, + 0xbc, 0x0c, 0xcc, 0x5a, 0xff, 0x6a, 0xd6, 0xe2, 0x47, 0x95, 0x88, 0x5f, 0x3f, 0x2e, 0x30, 0xc4, + 0xf3, 0x8d, 0x00, 0xcd, 0xcb, 0x00, 0xbe, 0xb1, 0xc0, 0x49, 0xdc, 0x91, 0x84, 0x33, 0x2c, 0x89, + 0x8b, 0xbd, 0x2d, 0xc2, 0x7c, 0x51, 0xca, 0xea, 0x13, 0xfe, 0x64, 0x27, 0xe7, 0x8d, 0x93, 0x93, + 0x77, 0xc6, 0x99, 0xd1, 0x7e, 0x67, 0xf0, 0x21, 0xc8, 0x86, 0xea, 0xd4, 0x73, 0x87, 0x7b, 0x02, + 0xa9, 0x13, 0x75, 0x17, 0x75, 0x8d, 0xd4, 0x39, 0x6b, 0x0e, 0x78, 0x1f, 0x64, 0x64, 0x47, 0x94, + 0x16, 0x66, 0xa6, 0x6a, 0xd4, 0xea, 0xab, 0x01, 0x6b, 0xd2, 0x56, 0xb4, 0xa2, 0x1b, 0xb5, 0x3a, + 0x52, 0x0c, 0x13, 0xe6, 0x6e, 0xfe, 0x3f, 0x98, 0xbb, 0x4d, 0x50, 0x6c, 0x4b, 0x19, 0x9a, 0xbd, + 0x58, 0x2a, 0xe8, 0x30, 0xaf, 0x1d, 0x7e, 0x19, 0x0a, 0xf7, 0xf8, 0xa0, 0x5f, 0x29, 0xa6, 0x04, + 0x28, 0x4d, 0x6c, 0x53, 0x50, 0x4c, 0x3d, 0xea, 0xe0, 0x73, 0x90, 0xa7, 0xd1, 0x60, 0x35, 0x6f, + 0x8a, 0xea, 0x21, 0xf7, 0x55, 0x32, 0xf5, 0x8c, 0x00, 0x0d, 0x09, 0xed, 0x1f, 0xc1, 0xe9, 0x49, + 0x3d, 0xa0, 0xfa, 0x79, 0x8b, 0x32, 0x7f, 0xfc, 0x8a, 0x3c, 0xa2, 0xcc, 0x47, 0x5a, 0x13, 0x3f, + 0x1f, 0xe7, 0xa7, 0x3e, 0x1f, 0x6d, 0xb0, 0xf0, 0x8a, 0xd0, 0x56, 0x5b, 0xea, 0xae, 0xcf, 0xb9, + 0x40, 0x0d, 0xe8, 0x67, 0x5a, 0x82, 0x8c, 0xc6, 0x0e, 0x4c, 0xaa, 0xbc, 0xde, 0xc6, 0xdc, 0xd7, + 0xf7, 0x4e, 0xfd, 0x58, 0x4f, 0x1e, 0xa6, 0xc9, 0xbd, 0x1b, 0x2a, 0x50, 0x62, 0xa3, 0x00, 0x3e, + 0x13, 0xf5, 0x5e, 0xb3, 0x49, 0x5f, 0x9b, 0x50, 0x62, 0xc0, 0xdd, 0xf5, 0x7a, 0xa4, 0x40, 0x89, + 0x8d, 0xbd, 0x9b, 0x05, 0x85, 0xb8, 0x6b, 0xe0, 0x23, 0x50, 0x94, 0x84, 0x77, 0x29, 0xc3, 0xfa, + 0x99, 0x3b, 0xba, 0xdb, 0x8a, 0x8d, 0x44, 0xa5, 0x3a, 0xa4, 0x51, 0xab, 0xa7, 0x24, 0xba, 0x43, + 0xd2, 0x68, 0x78, 0x03, 0x14, 0x3d, 0xc2, 0x25, 0x6d, 0x52, 0x0f, 0xcb, 0x61, 0x61, 0x4e, 0x0d, + 0xc9, 0x56, 0x13, 0x15, 0x4a, 0xdb, 0xc1, 0x0b, 0x20, 0xb3, 0x45, 0x76, 0xcc, 0x22, 0x2b, 0x1a, + 0xf3, 0xcc, 0x23, 0xb2, 0x83, 0x94, 0x1c, 0x7e, 0x0d, 0x8e, 0x7a, 0x38, 0x05, 0x36, 0x8b, 0xec, + 0x8c, 0x31, 0x3c, 0xba, 0x7a, 0x27, 0xcd, 0x3c, 0x6a, 0x0b, 0x5f, 0x80, 0x92, 0x4f, 0x84, 0x34, + 0x11, 0x8e, 0x98, 0x9a, 0xa7, 0xc2, 0x45, 0xc3, 0x53, 0xba, 0x3b, 0xc5, 0x0e, 0x4d, 0x65, 0x80, + 0x6f, 0x2d, 0x70, 0x81, 0x32, 0x41, 0xbc, 0x1e, 0x27, 0xf7, 0xfc, 0x16, 0x49, 0x55, 0xc7, 0xdc, + 0xba, 0x05, 0xed, 0xe3, 0xa1, 0xf1, 0x71, 0x61, 0xed, 0x63, 0xc6, 0x7b, 0xfd, 0xca, 0xa5, 0x8f, + 0x1a, 0xe8, 0x8a, 0x7f, 0xdc, 0x21, 0xfc, 0xc9, 0x02, 0xa7, 0xc8, 0x6b, 0x3d, 0xa3, 0x3a, 0xe9, + 0x64, 0xf3, 0x33, 0xcf, 0xdd, 0x49, 0x7f, 0x75, 0xba, 0xe7, 0x06, 0xfd, 0xca, 0xa9, 0x7b, 0xfb, + 0x79, 0xd1, 0x24, 0x67, 0xee, 0xfd, 0x77, 0xbb, 0xe5, 0xb9, 0xf7, 0xbb, 0xe5, 0xb9, 0x0f, 0xbb, + 0xe5, 0xb9, 0x37, 0x83, 0xb2, 0xf5, 0x6e, 0x50, 0xb6, 0xde, 0x0f, 0xca, 0xd6, 0x87, 0x41, 0xd9, + 0xfa, 0x6b, 0x50, 0xb6, 0xde, 0xfe, 0x5d, 0x9e, 0x7b, 0x7e, 0xe9, 0xc0, 0xff, 0x16, 0xfc, 0x1b, + 0x00, 0x00, 0xff, 0xff, 0x62, 0x5d, 0xac, 0x2e, 0x51, 0x10, 0x00, 0x00, +} + +func (m *LocalObjectReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LocalObjectReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LocalObjectReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Route) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Route) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Route) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RouteHTTPHeader) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RouteHTTPHeader) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RouteHTTPHeader) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Action.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RouteHTTPHeaderActionUnion) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RouteHTTPHeaderActionUnion) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RouteHTTPHeaderActionUnion) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Set != nil { + { + size, err := m.Set.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RouteHTTPHeaderActions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RouteHTTPHeaderActions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RouteHTTPHeaderActions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Request) > 0 { + for iNdEx := len(m.Request) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Request[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Response) > 0 { + for iNdEx := len(m.Response) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Response[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *RouteHTTPHeaders) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RouteHTTPHeaders) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RouteHTTPHeaders) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Actions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RouteIngress) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RouteIngress) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RouteIngress) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.RouterCanonicalHostname) + copy(dAtA[i:], m.RouterCanonicalHostname) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RouterCanonicalHostname))) + i-- + dAtA[i] = 0x2a + i -= len(m.WildcardPolicy) + copy(dAtA[i:], m.WildcardPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.WildcardPolicy))) + i-- + dAtA[i] = 0x22 + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.RouterName) + copy(dAtA[i:], m.RouterName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RouterName))) + i-- + dAtA[i] = 0x12 + i -= len(m.Host) + copy(dAtA[i:], m.Host) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RouteIngressCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RouteIngressCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RouteIngressCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.LastTransitionTime != nil { + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x22 + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RouteList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RouteList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RouteList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RoutePort) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RoutePort) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RoutePort) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.TargetPort.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RouteSetHTTPHeader) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RouteSetHTTPHeader) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RouteSetHTTPHeader) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RouteSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RouteSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RouteSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.HTTPHeaders != nil { + { + size, err := m.HTTPHeaders.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + i -= len(m.Subdomain) + copy(dAtA[i:], m.Subdomain) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Subdomain))) + i-- + dAtA[i] = 0x42 + i -= len(m.WildcardPolicy) + copy(dAtA[i:], m.WildcardPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.WildcardPolicy))) + i-- + dAtA[i] = 0x3a + if m.TLS != nil { + { + size, err := m.TLS.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + if m.Port != nil { + { + size, err := m.Port.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if len(m.AlternateBackends) > 0 { + for iNdEx := len(m.AlternateBackends) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AlternateBackends[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + { + size, err := m.To.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0x12 + i -= len(m.Host) + copy(dAtA[i:], m.Host) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RouteStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RouteStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RouteStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Ingress) > 0 { + for iNdEx := len(m.Ingress) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ingress[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *RouteTargetReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RouteTargetReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RouteTargetReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Weight != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Weight)) + i-- + dAtA[i] = 0x18 + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RouterShard) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RouterShard) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RouterShard) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.DNSSuffix) + copy(dAtA[i:], m.DNSSuffix) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DNSSuffix))) + i-- + dAtA[i] = 0x12 + i -= len(m.ShardName) + copy(dAtA[i:], m.ShardName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ShardName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *TLSConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TLSConfig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TLSConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ExternalCertificate != nil { + { + size, err := m.ExternalCertificate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + i -= len(m.InsecureEdgeTerminationPolicy) + copy(dAtA[i:], m.InsecureEdgeTerminationPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.InsecureEdgeTerminationPolicy))) + i-- + dAtA[i] = 0x32 + i -= len(m.DestinationCACertificate) + copy(dAtA[i:], m.DestinationCACertificate) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DestinationCACertificate))) + i-- + dAtA[i] = 0x2a + i -= len(m.CACertificate) + copy(dAtA[i:], m.CACertificate) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CACertificate))) + i-- + dAtA[i] = 0x22 + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0x1a + i -= len(m.Certificate) + copy(dAtA[i:], m.Certificate) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Certificate))) + i-- + dAtA[i] = 0x12 + i -= len(m.Termination) + copy(dAtA[i:], m.Termination) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Termination))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *LocalObjectReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Route) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *RouteHTTPHeader) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Action.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *RouteHTTPHeaderActionUnion) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.Set != nil { + l = m.Set.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *RouteHTTPHeaderActions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Response) > 0 { + for _, e := range m.Response { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Request) > 0 { + for _, e := range m.Request { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *RouteHTTPHeaders) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Actions.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *RouteIngress) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Host) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.RouterName) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.WildcardPolicy) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.RouterCanonicalHostname) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *RouteIngressCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + if m.LastTransitionTime != nil { + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *RouteList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *RoutePort) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.TargetPort.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *RouteSetHTTPHeader) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Value) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *RouteSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Host) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + l = m.To.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.AlternateBackends) > 0 { + for _, e := range m.AlternateBackends { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Port != nil { + l = m.Port.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.TLS != nil { + l = m.TLS.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.WildcardPolicy) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Subdomain) + n += 1 + l + sovGenerated(uint64(l)) + if m.HTTPHeaders != nil { + l = m.HTTPHeaders.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *RouteStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Ingress) > 0 { + for _, e := range m.Ingress { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *RouteTargetReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.Weight != nil { + n += 1 + sovGenerated(uint64(*m.Weight)) + } + return n +} + +func (m *RouterShard) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ShardName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DNSSuffix) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *TLSConfig) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Termination) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Certificate) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.CACertificate) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DestinationCACertificate) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.InsecureEdgeTerminationPolicy) + n += 1 + l + sovGenerated(uint64(l)) + if m.ExternalCertificate != nil { + l = m.ExternalCertificate.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *LocalObjectReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LocalObjectReference{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *Route) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Route{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "RouteSpec", "RouteSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "RouteStatus", "RouteStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *RouteHTTPHeader) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RouteHTTPHeader{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Action:` + strings.Replace(strings.Replace(this.Action.String(), "RouteHTTPHeaderActionUnion", "RouteHTTPHeaderActionUnion", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *RouteHTTPHeaderActionUnion) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RouteHTTPHeaderActionUnion{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Set:` + strings.Replace(this.Set.String(), "RouteSetHTTPHeader", "RouteSetHTTPHeader", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RouteHTTPHeaderActions) String() string { + if this == nil { + return "nil" + } + repeatedStringForResponse := "[]RouteHTTPHeader{" + for _, f := range this.Response { + repeatedStringForResponse += strings.Replace(strings.Replace(f.String(), "RouteHTTPHeader", "RouteHTTPHeader", 1), `&`, ``, 1) + "," + } + repeatedStringForResponse += "}" + repeatedStringForRequest := "[]RouteHTTPHeader{" + for _, f := range this.Request { + repeatedStringForRequest += strings.Replace(strings.Replace(f.String(), "RouteHTTPHeader", "RouteHTTPHeader", 1), `&`, ``, 1) + "," + } + repeatedStringForRequest += "}" + s := strings.Join([]string{`&RouteHTTPHeaderActions{`, + `Response:` + repeatedStringForResponse + `,`, + `Request:` + repeatedStringForRequest + `,`, + `}`, + }, "") + return s +} +func (this *RouteHTTPHeaders) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RouteHTTPHeaders{`, + `Actions:` + strings.Replace(strings.Replace(this.Actions.String(), "RouteHTTPHeaderActions", "RouteHTTPHeaderActions", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *RouteIngress) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]RouteIngressCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "RouteIngressCondition", "RouteIngressCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&RouteIngress{`, + `Host:` + fmt.Sprintf("%v", this.Host) + `,`, + `RouterName:` + fmt.Sprintf("%v", this.RouterName) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `WildcardPolicy:` + fmt.Sprintf("%v", this.WildcardPolicy) + `,`, + `RouterCanonicalHostname:` + fmt.Sprintf("%v", this.RouterCanonicalHostname) + `,`, + `}`, + }, "") + return s +} +func (this *RouteIngressCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RouteIngressCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `LastTransitionTime:` + strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RouteList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Route{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Route", "Route", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&RouteList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *RoutePort) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RoutePort{`, + `TargetPort:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.TargetPort), "IntOrString", "intstr.IntOrString", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *RouteSetHTTPHeader) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RouteSetHTTPHeader{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *RouteSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForAlternateBackends := "[]RouteTargetReference{" + for _, f := range this.AlternateBackends { + repeatedStringForAlternateBackends += strings.Replace(strings.Replace(f.String(), "RouteTargetReference", "RouteTargetReference", 1), `&`, ``, 1) + "," + } + repeatedStringForAlternateBackends += "}" + s := strings.Join([]string{`&RouteSpec{`, + `Host:` + fmt.Sprintf("%v", this.Host) + `,`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `To:` + strings.Replace(strings.Replace(this.To.String(), "RouteTargetReference", "RouteTargetReference", 1), `&`, ``, 1) + `,`, + `AlternateBackends:` + repeatedStringForAlternateBackends + `,`, + `Port:` + strings.Replace(this.Port.String(), "RoutePort", "RoutePort", 1) + `,`, + `TLS:` + strings.Replace(this.TLS.String(), "TLSConfig", "TLSConfig", 1) + `,`, + `WildcardPolicy:` + fmt.Sprintf("%v", this.WildcardPolicy) + `,`, + `Subdomain:` + fmt.Sprintf("%v", this.Subdomain) + `,`, + `HTTPHeaders:` + strings.Replace(this.HTTPHeaders.String(), "RouteHTTPHeaders", "RouteHTTPHeaders", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RouteStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForIngress := "[]RouteIngress{" + for _, f := range this.Ingress { + repeatedStringForIngress += strings.Replace(strings.Replace(f.String(), "RouteIngress", "RouteIngress", 1), `&`, ``, 1) + "," + } + repeatedStringForIngress += "}" + s := strings.Join([]string{`&RouteStatus{`, + `Ingress:` + repeatedStringForIngress + `,`, + `}`, + }, "") + return s +} +func (this *RouteTargetReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RouteTargetReference{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Weight:` + valueToStringGenerated(this.Weight) + `,`, + `}`, + }, "") + return s +} +func (this *RouterShard) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RouterShard{`, + `ShardName:` + fmt.Sprintf("%v", this.ShardName) + `,`, + `DNSSuffix:` + fmt.Sprintf("%v", this.DNSSuffix) + `,`, + `}`, + }, "") + return s +} +func (this *TLSConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TLSConfig{`, + `Termination:` + fmt.Sprintf("%v", this.Termination) + `,`, + `Certificate:` + fmt.Sprintf("%v", this.Certificate) + `,`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `CACertificate:` + fmt.Sprintf("%v", this.CACertificate) + `,`, + `DestinationCACertificate:` + fmt.Sprintf("%v", this.DestinationCACertificate) + `,`, + `InsecureEdgeTerminationPolicy:` + fmt.Sprintf("%v", this.InsecureEdgeTerminationPolicy) + `,`, + `ExternalCertificate:` + strings.Replace(this.ExternalCertificate.String(), "LocalObjectReference", "LocalObjectReference", 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *LocalObjectReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LocalObjectReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LocalObjectReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Route) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Route: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Route: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RouteHTTPHeader) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RouteHTTPHeader: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RouteHTTPHeader: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Action.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RouteHTTPHeaderActionUnion) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RouteHTTPHeaderActionUnion: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RouteHTTPHeaderActionUnion: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = RouteHTTPHeaderActionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Set", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Set == nil { + m.Set = &RouteSetHTTPHeader{} + } + if err := m.Set.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RouteHTTPHeaderActions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RouteHTTPHeaderActions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RouteHTTPHeaderActions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Response", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Response = append(m.Response, RouteHTTPHeader{}) + if err := m.Response[len(m.Response)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Request", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Request = append(m.Request, RouteHTTPHeader{}) + if err := m.Request[len(m.Request)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RouteHTTPHeaders) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RouteHTTPHeaders: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RouteHTTPHeaders: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Actions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Actions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RouteIngress) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RouteIngress: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RouteIngress: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Host = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RouterName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RouterName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, RouteIngressCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WildcardPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.WildcardPolicy = WildcardPolicyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RouterCanonicalHostname", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RouterCanonicalHostname = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RouteIngressCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RouteIngressCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RouteIngressCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = RouteIngressConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LastTransitionTime == nil { + m.LastTransitionTime = &v1.Time{} + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RouteList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RouteList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RouteList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Route{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RoutePort) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RoutePort: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RoutePort: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetPort", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.TargetPort.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RouteSetHTTPHeader) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RouteSetHTTPHeader: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RouteSetHTTPHeader: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RouteSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RouteSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RouteSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Host = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field To", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.To.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AlternateBackends", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AlternateBackends = append(m.AlternateBackends, RouteTargetReference{}) + if err := m.AlternateBackends[len(m.AlternateBackends)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Port == nil { + m.Port = &RoutePort{} + } + if err := m.Port.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TLS", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TLS == nil { + m.TLS = &TLSConfig{} + } + if err := m.TLS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WildcardPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.WildcardPolicy = WildcardPolicyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subdomain", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subdomain = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HTTPHeaders", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.HTTPHeaders == nil { + m.HTTPHeaders = &RouteHTTPHeaders{} + } + if err := m.HTTPHeaders.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RouteStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RouteStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RouteStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ingress", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ingress = append(m.Ingress, RouteIngress{}) + if err := m.Ingress[len(m.Ingress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RouteTargetReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RouteTargetReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RouteTargetReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Weight", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Weight = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RouterShard) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RouterShard: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RouterShard: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ShardName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ShardName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DNSSuffix", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DNSSuffix = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TLSConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TLSConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TLSConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Termination", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Termination = TLSTerminationType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Certificate", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Certificate = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CACertificate", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CACertificate = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DestinationCACertificate", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DestinationCACertificate = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InsecureEdgeTerminationPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.InsecureEdgeTerminationPolicy = InsecureEdgeTerminationPolicyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExternalCertificate", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ExternalCertificate == nil { + m.ExternalCertificate = &LocalObjectReference{} + } + if err := m.ExternalCertificate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/openshift/api/route/v1/generated.proto b/vendor/github.com/openshift/api/route/v1/generated.proto new file mode 100644 index 000000000..d31fa5222 --- /dev/null +++ b/vendor/github.com/openshift/api/route/v1/generated.proto @@ -0,0 +1,456 @@ + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package github.com.openshift.api.route.v1; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "github.com/openshift/api/route/v1"; + +// LocalObjectReference contains enough information to let you locate the +// referenced object inside the same namespace. +// +structType=atomic +message LocalObjectReference { + // name of the referent. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + // +optional + optional string name = 1; +} + +// A route allows developers to expose services through an HTTP(S) aware load balancing and proxy +// layer via a public DNS entry. The route may further specify TLS options and a certificate, or +// specify a public CNAME that the router should also accept for HTTP and HTTPS traffic. An +// administrator typically configures their router to be visible outside the cluster firewall, and +// may also add additional security, caching, or traffic controls on the service content. Routers +// usually talk directly to the service endpoints. +// +// Once a route is created, the `host` field may not be changed. Generally, routers use the oldest +// route with a given host when resolving conflicts. +// +// Routers are subject to additional customization and may support additional controls via the +// annotations field. +// +// Because administrators may configure multiple routers, the route status field is used to +// return information to clients about the names and states of the route under each router. +// If a client chooses a duplicate name, for instance, the route status conditions are used +// to indicate the route cannot be chosen. +// +// To enable HTTP/2 ALPN on a route it requires a custom +// (non-wildcard) certificate. This prevents connection coalescing by +// clients, notably web browsers. We do not support HTTP/2 ALPN on +// routes that use the default certificate because of the risk of +// connection re-use/coalescing. Routes that do not have their own +// custom certificate will not be HTTP/2 ALPN-enabled on either the +// frontend or the backend. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message Route { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // spec is the desired state of the route + // +kubebuilder:validation:XValidation:rule="!has(self.tls) || self.tls.termination != 'passthrough' || !has(self.httpHeaders)",message="header actions are not permitted when tls termination is passthrough." + optional RouteSpec spec = 2; + + // status is the current state of the route + // +optional + optional RouteStatus status = 3; +} + +// RouteHTTPHeader specifies configuration for setting or deleting an HTTP header. +message RouteHTTPHeader { + // name specifies the name of a header on which to perform an action. Its value must be a valid HTTP header + // name as defined in RFC 2616 section 4.2. + // The name must consist only of alphanumeric and the following special characters, "-!#$%&'*+.^_`". + // The following header names are reserved and may not be modified via this API: + // Strict-Transport-Security, Proxy, Cookie, Set-Cookie. + // It must be no more than 255 characters in length. + // Header name must be unique. + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=255 + // +kubebuilder:validation:Pattern="^[-!#$%&'*+.0-9A-Z^_`a-z|~]+$" + // +kubebuilder:validation:XValidation:rule="self.lowerAscii() != 'strict-transport-security'",message="strict-transport-security header may not be modified via header actions" + // +kubebuilder:validation:XValidation:rule="self.lowerAscii() != 'proxy'",message="proxy header may not be modified via header actions" + // +kubebuilder:validation:XValidation:rule="self.lowerAscii() != 'cookie'",message="cookie header may not be modified via header actions" + // +kubebuilder:validation:XValidation:rule="self.lowerAscii() != 'set-cookie'",message="set-cookie header may not be modified via header actions" + optional string name = 1; + + // action specifies actions to perform on headers, such as setting or deleting headers. + // +kubebuilder:validation:Required + optional RouteHTTPHeaderActionUnion action = 2; +} + +// RouteHTTPHeaderActionUnion specifies an action to take on an HTTP header. +// +kubebuilder:validation:XValidation:rule="has(self.type) && self.type == 'Set' ? has(self.set) : !has(self.set)",message="set is required when type is Set, and forbidden otherwise" +// +union +message RouteHTTPHeaderActionUnion { + // type defines the type of the action to be applied on the header. + // Possible values are Set or Delete. + // Set allows you to set HTTP request and response headers. + // Delete allows you to delete HTTP request and response headers. + // +unionDiscriminator + // +kubebuilder:validation:Enum:=Set;Delete + // +kubebuilder:validation:Required + optional string type = 1; + + // set defines the HTTP header that should be set: added if it doesn't exist or replaced if it does. + // This field is required when type is Set and forbidden otherwise. + // +optional + // +unionMember + optional RouteSetHTTPHeader set = 2; +} + +// RouteHTTPHeaderActions defines configuration for actions on HTTP request and response headers. +message RouteHTTPHeaderActions { + // response is a list of HTTP response headers to modify. + // Currently, actions may define to either `Set` or `Delete` headers values. + // Actions defined here will modify the response headers of all requests made through a route. + // These actions are applied to a specific Route defined within a cluster i.e. connections made through a route. + // Route actions will be executed before IngressController actions for response headers. + // Actions are applied in sequence as defined in this list. + // A maximum of 20 response header actions may be configured. + // You can use this field to specify HTTP response headers that should be set or deleted + // when forwarding responses from your application to the client. + // Sample fetchers allowed are "res.hdr" and "ssl_c_der". + // Converters allowed are "lower" and "base64". + // Example header values: "%[res.hdr(X-target),lower]", "%{+Q}[ssl_c_der,base64]". + // Note: This field cannot be used if your route uses TLS passthrough. + // + --- + // + Note: Any change to regex mentioned below must be reflected in the CRD validation of route in https://github.com/openshift/library-go/blob/master/pkg/route/validation/validation.go and vice-versa. + // +listType=map + // +listMapKey=name + // +optional + // +kubebuilder:validation:MaxItems=20 + // +kubebuilder:validation:XValidation:rule=`self.all(key, key.action.type == "Delete" || (has(key.action.set) && key.action.set.value.matches('^(?:%(?:%|(?:\\{[-+]?[QXE](?:,[-+]?[QXE])*\\})?\\[(?:res\\.hdr\\([0-9A-Za-z-]+\\)|ssl_c_der)(?:,(?:lower|base64))*\\])|[^%[:cntrl:]])+$')))`,message="Either the header value provided is not in correct format or the sample fetcher/converter specified is not allowed. The dynamic header value will be interpreted as an HAProxy format string as defined in http://cbonte.github.io/haproxy-dconv/2.6/configuration.html#8.2.6 and may use HAProxy's %[] syntax and otherwise must be a valid HTTP header value as defined in https://datatracker.ietf.org/doc/html/rfc7230#section-3.2. Sample fetchers allowed are res.hdr, ssl_c_der. Converters allowed are lower, base64." + repeated RouteHTTPHeader response = 1; + + // request is a list of HTTP request headers to modify. + // Currently, actions may define to either `Set` or `Delete` headers values. + // Actions defined here will modify the request headers of all requests made through a route. + // These actions are applied to a specific Route defined within a cluster i.e. connections made through a route. + // Currently, actions may define to either `Set` or `Delete` headers values. + // Route actions will be executed after IngressController actions for request headers. + // Actions are applied in sequence as defined in this list. + // A maximum of 20 request header actions may be configured. + // You can use this field to specify HTTP request headers that should be set or deleted + // when forwarding connections from the client to your application. + // Sample fetchers allowed are "req.hdr" and "ssl_c_der". + // Converters allowed are "lower" and "base64". + // Example header values: "%[req.hdr(X-target),lower]", "%{+Q}[ssl_c_der,base64]". + // Any request header configuration applied directly via a Route resource using this API + // will override header configuration for a header of the same name applied via + // spec.httpHeaders.actions on the IngressController or route annotation. + // Note: This field cannot be used if your route uses TLS passthrough. + // + --- + // + Note: Any change to regex mentioned below must be reflected in the CRD validation of route in https://github.com/openshift/library-go/blob/master/pkg/route/validation/validation.go and vice-versa. + // +listType=map + // +listMapKey=name + // +optional + // +kubebuilder:validation:MaxItems=20 + // +kubebuilder:validation:XValidation:rule=`self.all(key, key.action.type == "Delete" || (has(key.action.set) && key.action.set.value.matches('^(?:%(?:%|(?:\\{[-+]?[QXE](?:,[-+]?[QXE])*\\})?\\[(?:req\\.hdr\\([0-9A-Za-z-]+\\)|ssl_c_der)(?:,(?:lower|base64))*\\])|[^%[:cntrl:]])+$')))`,message="Either the header value provided is not in correct format or the sample fetcher/converter specified is not allowed. The dynamic header value will be interpreted as an HAProxy format string as defined in http://cbonte.github.io/haproxy-dconv/2.6/configuration.html#8.2.6 and may use HAProxy's %[] syntax and otherwise must be a valid HTTP header value as defined in https://datatracker.ietf.org/doc/html/rfc7230#section-3.2. Sample fetchers allowed are req.hdr, ssl_c_der. Converters allowed are lower, base64." + repeated RouteHTTPHeader request = 2; +} + +// RouteHTTPHeaders defines policy for HTTP headers. +message RouteHTTPHeaders { + // actions specifies options for modifying headers and their values. + // Note that this option only applies to cleartext HTTP connections + // and to secure HTTP connections for which the ingress controller + // terminates encryption (that is, edge-terminated or reencrypt + // connections). Headers cannot be modified for TLS passthrough + // connections. + // Setting the HSTS (`Strict-Transport-Security`) header is not supported via actions. + // `Strict-Transport-Security` may only be configured using the "haproxy.router.openshift.io/hsts_header" + // route annotation, and only in accordance with the policy specified in Ingress.Spec.RequiredHSTSPolicies. + // In case of HTTP request headers, the actions specified in spec.httpHeaders.actions on the Route will be executed after + // the actions specified in the IngressController's spec.httpHeaders.actions field. + // In case of HTTP response headers, the actions specified in spec.httpHeaders.actions on the IngressController will be + // executed after the actions specified in the Route's spec.httpHeaders.actions field. + // The headers set via this API will not appear in access logs. + // Any actions defined here are applied after any actions related to the following other fields: + // cache-control, spec.clientTLS, + // spec.httpHeaders.forwardedHeaderPolicy, spec.httpHeaders.uniqueId, + // and spec.httpHeaders.headerNameCaseAdjustments. + // The following header names are reserved and may not be modified via this API: + // Strict-Transport-Security, Proxy, Cookie, Set-Cookie. + // Note that the total size of all net added headers *after* interpolating dynamic values + // must not exceed the value of spec.tuningOptions.headerBufferMaxRewriteBytes on the + // IngressController. Please refer to the documentation + // for that API field for more details. + // +optional + optional RouteHTTPHeaderActions actions = 1; +} + +// RouteIngress holds information about the places where a route is exposed. +message RouteIngress { + // Host is the host string under which the route is exposed; this value is required + optional string host = 1; + + // Name is a name chosen by the router to identify itself; this value is required + optional string routerName = 2; + + // Conditions is the state of the route, may be empty. + repeated RouteIngressCondition conditions = 3; + + // Wildcard policy is the wildcard policy that was allowed where this route is exposed. + optional string wildcardPolicy = 4; + + // CanonicalHostname is the external host name for the router that can be used as a CNAME + // for the host requested for this route. This value is optional and may not be set in all cases. + optional string routerCanonicalHostname = 5; +} + +// RouteIngressCondition contains details for the current condition of this route on a particular +// router. +message RouteIngressCondition { + // Type is the type of the condition. + // Currently only Admitted. + optional string type = 1; + + // Status is the status of the condition. + // Can be True, False, Unknown. + optional string status = 2; + + // (brief) reason for the condition's last transition, and is usually a machine and human + // readable constant + optional string reason = 3; + + // Human readable message indicating details about last transition. + optional string message = 4; + + // RFC 3339 date and time when this condition last transitioned + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 5; +} + +// RouteList is a collection of Routes. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message RouteList { + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is a list of routes + repeated Route items = 2; +} + +// RoutePort defines a port mapping from a router to an endpoint in the service endpoints. +message RoutePort { + // The target port on pods selected by the service this route points to. + // If this is a string, it will be looked up as a named port in the target + // endpoints port list. Required + optional k8s.io.apimachinery.pkg.util.intstr.IntOrString targetPort = 1; +} + +// RouteSetHTTPHeader specifies what value needs to be set on an HTTP header. +message RouteSetHTTPHeader { + // value specifies a header value. + // Dynamic values can be added. The value will be interpreted as an HAProxy format string as defined in + // http://cbonte.github.io/haproxy-dconv/2.6/configuration.html#8.2.6 and may use HAProxy's %[] syntax and + // otherwise must be a valid HTTP header value as defined in https://datatracker.ietf.org/doc/html/rfc7230#section-3.2. + // The value of this field must be no more than 16384 characters in length. + // Note that the total size of all net added headers *after* interpolating dynamic values + // must not exceed the value of spec.tuningOptions.headerBufferMaxRewriteBytes on the + // IngressController. + // + --- + // + Note: This limit was selected as most common web servers have a limit of 16384 characters or some lower limit. + // + See . + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=16384 + optional string value = 1; +} + +// RouteSpec describes the hostname or path the route exposes, any security information, +// and one to four backends (services) the route points to. Requests are distributed +// among the backends depending on the weights assigned to each backend. When using +// roundrobin scheduling the portion of requests that go to each backend is the backend +// weight divided by the sum of all of the backend weights. When the backend has more than +// one endpoint the requests that end up on the backend are roundrobin distributed among +// the endpoints. Weights are between 0 and 256 with default 100. Weight 0 causes no requests +// to the backend. If all weights are zero the route will be considered to have no backends +// and return a standard 503 response. +// +// The `tls` field is optional and allows specific certificates or behavior for the +// route. Routers typically configure a default certificate on a wildcard domain to +// terminate routes without explicit certificates, but custom hostnames usually must +// choose passthrough (send traffic directly to the backend via the TLS Server-Name- +// Indication field) or provide a certificate. +message RouteSpec { + // host is an alias/DNS that points to the service. Optional. + // If not specified a route name will typically be automatically + // chosen. + // Must follow DNS952 subdomain conventions. + // + // +optional + // +kubebuilder:validation:MaxLength=253 + // +kubebuilder:validation:Pattern=`^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$` + optional string host = 1; + + // subdomain is a DNS subdomain that is requested within the ingress controller's + // domain (as a subdomain). If host is set this field is ignored. An ingress + // controller may choose to ignore this suggested name, in which case the controller + // will report the assigned name in the status.ingress array or refuse to admit the + // route. If this value is set and the server does not support this field host will + // be populated automatically. Otherwise host is left empty. The field may have + // multiple parts separated by a dot, but not all ingress controllers may honor + // the request. This field may not be changed after creation except by a user with + // the update routes/custom-host permission. + // + // Example: subdomain `frontend` automatically receives the router subdomain + // `apps.mycluster.com` to have a full hostname `frontend.apps.mycluster.com`. + // + // +optional + // +kubebuilder:validation:MaxLength=253 + // +kubebuilder:validation:Pattern=`^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$` + optional string subdomain = 8; + + // path that the router watches for, to route traffic for to the service. Optional + // + // +optional + // +kubebuilder:validation:Pattern=`^/` + optional string path = 2; + + // to is an object the route should use as the primary backend. Only the Service kind + // is allowed, and it will be defaulted to Service. If the weight field (0-256 default 100) + // is set to zero, no traffic will be sent to this backend. + optional RouteTargetReference to = 3; + + // alternateBackends allows up to 3 additional backends to be assigned to the route. + // Only the Service kind is allowed, and it will be defaulted to Service. + // Use the weight field in RouteTargetReference object to specify relative preference. + // + // +kubebuilder:validation:MaxItems=3 + repeated RouteTargetReference alternateBackends = 4; + + // If specified, the port to be used by the router. Most routers will use all + // endpoints exposed by the service by default - set this value to instruct routers + // which port to use. + optional RoutePort port = 5; + + // The tls field provides the ability to configure certificates and termination for the route. + optional TLSConfig tls = 6; + + // Wildcard policy if any for the route. + // Currently only 'Subdomain' or 'None' is allowed. + // + // +kubebuilder:validation:Enum=None;Subdomain;"" + // +kubebuilder:default=None + optional string wildcardPolicy = 7; + + // httpHeaders defines policy for HTTP headers. + // + // +optional + optional RouteHTTPHeaders httpHeaders = 9; +} + +// RouteStatus provides relevant info about the status of a route, including which routers +// acknowledge it. +message RouteStatus { + // ingress describes the places where the route may be exposed. The list of + // ingress points may contain duplicate Host or RouterName values. Routes + // are considered live once they are `Ready` + repeated RouteIngress ingress = 1; +} + +// RouteTargetReference specifies the target that resolve into endpoints. Only the 'Service' +// kind is allowed. Use 'weight' field to emphasize one over others. +message RouteTargetReference { + // The kind of target that the route is referring to. Currently, only 'Service' is allowed + // + // +kubebuilder:validation:Enum=Service;"" + // +kubebuilder:default=Service + optional string kind = 1; + + // name of the service/target that is being referred to. e.g. name of the service + // + // +kubebuilder:validation:MinLength=1 + optional string name = 2; + + // weight as an integer between 0 and 256, default 100, that specifies the target's relative weight + // against other target reference objects. 0 suppresses requests to this backend. + // + // +optional + // +kubebuilder:validation:Minimum=0 + // +kubebuilder:validation:Maximum=256 + // +kubebuilder:default=100 + optional int32 weight = 3; +} + +// RouterShard has information of a routing shard and is used to +// generate host names and routing table entries when a routing shard is +// allocated for a specific route. +// Caveat: This is WIP and will likely undergo modifications when sharding +// support is added. +message RouterShard { + // shardName uniquely identifies a router shard in the "set" of + // routers used for routing traffic to the services. + optional string shardName = 1; + + // dnsSuffix for the shard ala: shard-1.v3.openshift.com + optional string dnsSuffix = 2; +} + +// TLSConfig defines config used to secure a route and provide termination +// +// +kubebuilder:validation:XValidation:rule="has(self.termination) && has(self.insecureEdgeTerminationPolicy) ? !((self.termination=='passthrough') && (self.insecureEdgeTerminationPolicy=='Allow')) : true", message="cannot have both spec.tls.termination: passthrough and spec.tls.insecureEdgeTerminationPolicy: Allow" +// +openshift:validation:FeatureSetAwareXValidation:featureSet=TechPreviewNoUpgrade;CustomNoUpgrade,rule="!(has(self.certificate) && has(self.externalCertificate))", message="cannot have both spec.tls.certificate and spec.tls.externalCertificate" +message TLSConfig { + // termination indicates termination type. + // + // * edge - TLS termination is done by the router and http is used to communicate with the backend (default) + // * passthrough - Traffic is sent straight to the destination without the router providing TLS termination + // * reencrypt - TLS termination is done by the router and https is used to communicate with the backend + // + // Note: passthrough termination is incompatible with httpHeader actions + // +kubebuilder:validation:Enum=edge;reencrypt;passthrough + optional string termination = 1; + + // certificate provides certificate contents. This should be a single serving certificate, not a certificate + // chain. Do not include a CA certificate. + optional string certificate = 2; + + // key provides key file contents + optional string key = 3; + + // caCertificate provides the cert authority certificate contents + optional string caCertificate = 4; + + // destinationCACertificate provides the contents of the ca certificate of the final destination. When using reencrypt + // termination this file should be provided in order to have routers use it for health checks on the secure connection. + // If this field is not specified, the router may provide its own destination CA and perform hostname validation using + // the short service name (service.namespace.svc), which allows infrastructure generated certificates to automatically + // verify. + optional string destinationCACertificate = 5; + + // insecureEdgeTerminationPolicy indicates the desired behavior for insecure connections to a route. While + // each router may make its own decisions on which ports to expose, this is normally port 80. + // + // * Allow - traffic is sent to the server on the insecure port (edge/reencrypt terminations only) (default). + // * None - no traffic is allowed on the insecure port. + // * Redirect - clients are redirected to the secure port. + // + // +kubebuilder:validation:Enum=Allow;None;Redirect;"" + optional string insecureEdgeTerminationPolicy = 6; + + // externalCertificate provides certificate contents as a secret reference. + // This should be a single serving certificate, not a certificate + // chain. Do not include a CA certificate. The secret referenced should + // be present in the same namespace as that of the Route. + // Forbidden when `certificate` is set. + // + // +openshift:enable:FeatureSets=CustomNoUpgrade;TechPreviewNoUpgrade + // +optional + optional LocalObjectReference externalCertificate = 7; +} + diff --git a/vendor/github.com/openshift/api/route/v1/legacy.go b/vendor/github.com/openshift/api/route/v1/legacy.go new file mode 100644 index 000000000..498f5dd0f --- /dev/null +++ b/vendor/github.com/openshift/api/route/v1/legacy.go @@ -0,0 +1,22 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + legacyGroupVersion = schema.GroupVersion{Group: "", Version: "v1"} + legacySchemeBuilder = runtime.NewSchemeBuilder(addLegacyKnownTypes, corev1.AddToScheme) + DeprecatedInstallWithoutGroup = legacySchemeBuilder.AddToScheme +) + +func addLegacyKnownTypes(scheme *runtime.Scheme) error { + types := []runtime.Object{ + &Route{}, + &RouteList{}, + } + scheme.AddKnownTypes(legacyGroupVersion, types...) + return nil +} diff --git a/vendor/github.com/openshift/api/route/v1/register.go b/vendor/github.com/openshift/api/route/v1/register.go new file mode 100644 index 000000000..6f99ef5c9 --- /dev/null +++ b/vendor/github.com/openshift/api/route/v1/register.go @@ -0,0 +1,39 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + GroupName = "route.openshift.io" + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, corev1.AddToScheme) + // Install is a function which adds this version to a scheme + Install = schemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = schemeBuilder.AddToScheme +) + +// Resource generated code relies on this being here, but it logically belongs to the group +// DEPRECATED +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &Route{}, + &RouteList{}, + ) + metav1.AddToGroupVersion(scheme, GroupVersion) + return nil +} diff --git a/vendor/github.com/openshift/api/route/v1/route-CustomNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/route/v1/route-CustomNoUpgrade.crd.yaml new file mode 100644 index 000000000..13461f666 --- /dev/null +++ b/vendor/github.com/openshift/api/route/v1/route-CustomNoUpgrade.crd.yaml @@ -0,0 +1,364 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/1228 + release.openshift.io/feature-set: CustomNoUpgrade + name: routes.route.openshift.io +spec: + group: route.openshift.io + names: + kind: Route + plural: routes + singular: route + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.ingress[0].host + name: Host + type: string + - jsonPath: .status.ingress[0].conditions[?(@.type=="Admitted")].status + name: Admitted + type: string + - jsonPath: .spec.to.name + name: Service + type: string + - jsonPath: .spec.tls.type + name: TLS + type: string + name: v1 + schema: + openAPIV3Schema: + description: "A route allows developers to expose services through an HTTP(S) aware load balancing and proxy layer via a public DNS entry. The route may further specify TLS options and a certificate, or specify a public CNAME that the router should also accept for HTTP and HTTPS traffic. An administrator typically configures their router to be visible outside the cluster firewall, and may also add additional security, caching, or traffic controls on the service content. Routers usually talk directly to the service endpoints. \n Once a route is created, the `host` field may not be changed. Generally, routers use the oldest route with a given host when resolving conflicts. \n Routers are subject to additional customization and may support additional controls via the annotations field. \n Because administrators may configure multiple routers, the route status field is used to return information to clients about the names and states of the route under each router. If a client chooses a duplicate name, for instance, the route status conditions are used to indicate the route cannot be chosen. \n To enable HTTP/2 ALPN on a route it requires a custom (non-wildcard) certificate. This prevents connection coalescing by clients, notably web browsers. We do not support HTTP/2 ALPN on routes that use the default certificate because of the risk of connection re-use/coalescing. Routes that do not have their own custom certificate will not be HTTP/2 ALPN-enabled on either the frontend or the backend. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec is the desired state of the route + type: object + required: + - to + properties: + alternateBackends: + description: alternateBackends allows up to 3 additional backends to be assigned to the route. Only the Service kind is allowed, and it will be defaulted to Service. Use the weight field in RouteTargetReference object to specify relative preference. + type: array + maxItems: 3 + items: + description: RouteTargetReference specifies the target that resolve into endpoints. Only the 'Service' kind is allowed. Use 'weight' field to emphasize one over others. + type: object + required: + - kind + - name + properties: + kind: + description: The kind of target that the route is referring to. Currently, only 'Service' is allowed + type: string + default: Service + enum: + - Service + - "" + name: + description: name of the service/target that is being referred to. e.g. name of the service + type: string + minLength: 1 + weight: + description: weight as an integer between 0 and 256, default 100, that specifies the target's relative weight against other target reference objects. 0 suppresses requests to this backend. + type: integer + format: int32 + default: 100 + maximum: 256 + minimum: 0 + host: + description: host is an alias/DNS that points to the service. Optional. If not specified a route name will typically be automatically chosen. Must follow DNS952 subdomain conventions. + type: string + maxLength: 253 + pattern: ^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$ + httpHeaders: + description: httpHeaders defines policy for HTTP headers. + type: object + properties: + actions: + description: 'actions specifies options for modifying headers and their values. Note that this option only applies to cleartext HTTP connections and to secure HTTP connections for which the ingress controller terminates encryption (that is, edge-terminated or reencrypt connections). Headers cannot be modified for TLS passthrough connections. Setting the HSTS (`Strict-Transport-Security`) header is not supported via actions. `Strict-Transport-Security` may only be configured using the "haproxy.router.openshift.io/hsts_header" route annotation, and only in accordance with the policy specified in Ingress.Spec.RequiredHSTSPolicies. In case of HTTP request headers, the actions specified in spec.httpHeaders.actions on the Route will be executed after the actions specified in the IngressController''s spec.httpHeaders.actions field. In case of HTTP response headers, the actions specified in spec.httpHeaders.actions on the IngressController will be executed after the actions specified in the Route''s spec.httpHeaders.actions field. The headers set via this API will not appear in access logs. Any actions defined here are applied after any actions related to the following other fields: cache-control, spec.clientTLS, spec.httpHeaders.forwardedHeaderPolicy, spec.httpHeaders.uniqueId, and spec.httpHeaders.headerNameCaseAdjustments. The following header names are reserved and may not be modified via this API: Strict-Transport-Security, Proxy, Cookie, Set-Cookie. Note that the total size of all net added headers *after* interpolating dynamic values must not exceed the value of spec.tuningOptions.headerBufferMaxRewriteBytes on the IngressController. Please refer to the documentation for that API field for more details.' + type: object + properties: + request: + description: 'request is a list of HTTP request headers to modify. Currently, actions may define to either `Set` or `Delete` headers values. Actions defined here will modify the request headers of all requests made through a route. These actions are applied to a specific Route defined within a cluster i.e. connections made through a route. Currently, actions may define to either `Set` or `Delete` headers values. Route actions will be executed after IngressController actions for request headers. Actions are applied in sequence as defined in this list. A maximum of 20 request header actions may be configured. You can use this field to specify HTTP request headers that should be set or deleted when forwarding connections from the client to your application. Sample fetchers allowed are "req.hdr" and "ssl_c_der". Converters allowed are "lower" and "base64". Example header values: "%[req.hdr(X-target),lower]", "%{+Q}[ssl_c_der,base64]". Any request header configuration applied directly via a Route resource using this API will override header configuration for a header of the same name applied via spec.httpHeaders.actions on the IngressController or route annotation. Note: This field cannot be used if your route uses TLS passthrough.' + type: array + maxItems: 20 + items: + description: RouteHTTPHeader specifies configuration for setting or deleting an HTTP header. + type: object + required: + - action + - name + properties: + action: + description: action specifies actions to perform on headers, such as setting or deleting headers. + type: object + required: + - type + properties: + set: + description: 'set defines the HTTP header that should be set: added if it doesn''t exist or replaced if it does. This field is required when type is Set and forbidden otherwise.' + type: object + required: + - value + properties: + value: + description: value specifies a header value. Dynamic values can be added. The value will be interpreted as an HAProxy format string as defined in http://cbonte.github.io/haproxy-dconv/2.6/configuration.html#8.2.6 and may use HAProxy's %[] syntax and otherwise must be a valid HTTP header value as defined in https://datatracker.ietf.org/doc/html/rfc7230#section-3.2. The value of this field must be no more than 16384 characters in length. Note that the total size of all net added headers *after* interpolating dynamic values must not exceed the value of spec.tuningOptions.headerBufferMaxRewriteBytes on the IngressController. + type: string + maxLength: 16384 + minLength: 1 + type: + description: type defines the type of the action to be applied on the header. Possible values are Set or Delete. Set allows you to set HTTP request and response headers. Delete allows you to delete HTTP request and response headers. + type: string + enum: + - Set + - Delete + x-kubernetes-validations: + - rule: 'has(self.type) && self.type == ''Set'' ? has(self.set) : !has(self.set)' + message: set is required when type is Set, and forbidden otherwise + name: + description: 'name specifies the name of a header on which to perform an action. Its value must be a valid HTTP header name as defined in RFC 2616 section 4.2. The name must consist only of alphanumeric and the following special characters, "-!#$%&''*+.^_`". The following header names are reserved and may not be modified via this API: Strict-Transport-Security, Proxy, Cookie, Set-Cookie. It must be no more than 255 characters in length. Header name must be unique.' + type: string + maxLength: 255 + minLength: 1 + pattern: ^[-!#$%&'*+.0-9A-Z^_`a-z|~]+$ + x-kubernetes-validations: + - rule: self.lowerAscii() != 'strict-transport-security' + message: strict-transport-security header may not be modified via header actions + - rule: self.lowerAscii() != 'proxy' + message: proxy header may not be modified via header actions + - rule: self.lowerAscii() != 'cookie' + message: cookie header may not be modified via header actions + - rule: self.lowerAscii() != 'set-cookie' + message: set-cookie header may not be modified via header actions + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + x-kubernetes-validations: + - rule: self.all(key, key.action.type == "Delete" || (has(key.action.set) && key.action.set.value.matches('^(?:%(?:%|(?:\\{[-+]?[QXE](?:,[-+]?[QXE])*\\})?\\[(?:req\\.hdr\\([0-9A-Za-z-]+\\)|ssl_c_der)(?:,(?:lower|base64))*\\])|[^%[:cntrl:]])+$'))) + message: Either the header value provided is not in correct format or the sample fetcher/converter specified is not allowed. The dynamic header value will be interpreted as an HAProxy format string as defined in http://cbonte.github.io/haproxy-dconv/2.6/configuration.html#8.2.6 and may use HAProxy's %[] syntax and otherwise must be a valid HTTP header value as defined in https://datatracker.ietf.org/doc/html/rfc7230#section-3.2. Sample fetchers allowed are req.hdr, ssl_c_der. Converters allowed are lower, base64. + response: + description: 'response is a list of HTTP response headers to modify. Currently, actions may define to either `Set` or `Delete` headers values. Actions defined here will modify the response headers of all requests made through a route. These actions are applied to a specific Route defined within a cluster i.e. connections made through a route. Route actions will be executed before IngressController actions for response headers. Actions are applied in sequence as defined in this list. A maximum of 20 response header actions may be configured. You can use this field to specify HTTP response headers that should be set or deleted when forwarding responses from your application to the client. Sample fetchers allowed are "res.hdr" and "ssl_c_der". Converters allowed are "lower" and "base64". Example header values: "%[res.hdr(X-target),lower]", "%{+Q}[ssl_c_der,base64]". Note: This field cannot be used if your route uses TLS passthrough.' + type: array + maxItems: 20 + items: + description: RouteHTTPHeader specifies configuration for setting or deleting an HTTP header. + type: object + required: + - action + - name + properties: + action: + description: action specifies actions to perform on headers, such as setting or deleting headers. + type: object + required: + - type + properties: + set: + description: 'set defines the HTTP header that should be set: added if it doesn''t exist or replaced if it does. This field is required when type is Set and forbidden otherwise.' + type: object + required: + - value + properties: + value: + description: value specifies a header value. Dynamic values can be added. The value will be interpreted as an HAProxy format string as defined in http://cbonte.github.io/haproxy-dconv/2.6/configuration.html#8.2.6 and may use HAProxy's %[] syntax and otherwise must be a valid HTTP header value as defined in https://datatracker.ietf.org/doc/html/rfc7230#section-3.2. The value of this field must be no more than 16384 characters in length. Note that the total size of all net added headers *after* interpolating dynamic values must not exceed the value of spec.tuningOptions.headerBufferMaxRewriteBytes on the IngressController. + type: string + maxLength: 16384 + minLength: 1 + type: + description: type defines the type of the action to be applied on the header. Possible values are Set or Delete. Set allows you to set HTTP request and response headers. Delete allows you to delete HTTP request and response headers. + type: string + enum: + - Set + - Delete + x-kubernetes-validations: + - rule: 'has(self.type) && self.type == ''Set'' ? has(self.set) : !has(self.set)' + message: set is required when type is Set, and forbidden otherwise + name: + description: 'name specifies the name of a header on which to perform an action. Its value must be a valid HTTP header name as defined in RFC 2616 section 4.2. The name must consist only of alphanumeric and the following special characters, "-!#$%&''*+.^_`". The following header names are reserved and may not be modified via this API: Strict-Transport-Security, Proxy, Cookie, Set-Cookie. It must be no more than 255 characters in length. Header name must be unique.' + type: string + maxLength: 255 + minLength: 1 + pattern: ^[-!#$%&'*+.0-9A-Z^_`a-z|~]+$ + x-kubernetes-validations: + - rule: self.lowerAscii() != 'strict-transport-security' + message: strict-transport-security header may not be modified via header actions + - rule: self.lowerAscii() != 'proxy' + message: proxy header may not be modified via header actions + - rule: self.lowerAscii() != 'cookie' + message: cookie header may not be modified via header actions + - rule: self.lowerAscii() != 'set-cookie' + message: set-cookie header may not be modified via header actions + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + x-kubernetes-validations: + - rule: self.all(key, key.action.type == "Delete" || (has(key.action.set) && key.action.set.value.matches('^(?:%(?:%|(?:\\{[-+]?[QXE](?:,[-+]?[QXE])*\\})?\\[(?:res\\.hdr\\([0-9A-Za-z-]+\\)|ssl_c_der)(?:,(?:lower|base64))*\\])|[^%[:cntrl:]])+$'))) + message: Either the header value provided is not in correct format or the sample fetcher/converter specified is not allowed. The dynamic header value will be interpreted as an HAProxy format string as defined in http://cbonte.github.io/haproxy-dconv/2.6/configuration.html#8.2.6 and may use HAProxy's %[] syntax and otherwise must be a valid HTTP header value as defined in https://datatracker.ietf.org/doc/html/rfc7230#section-3.2. Sample fetchers allowed are res.hdr, ssl_c_der. Converters allowed are lower, base64. + path: + description: path that the router watches for, to route traffic for to the service. Optional + type: string + pattern: ^/ + port: + description: If specified, the port to be used by the router. Most routers will use all endpoints exposed by the service by default - set this value to instruct routers which port to use. + type: object + required: + - targetPort + properties: + targetPort: + description: The target port on pods selected by the service this route points to. If this is a string, it will be looked up as a named port in the target endpoints port list. Required + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + subdomain: + description: "subdomain is a DNS subdomain that is requested within the ingress controller's domain (as a subdomain). If host is set this field is ignored. An ingress controller may choose to ignore this suggested name, in which case the controller will report the assigned name in the status.ingress array or refuse to admit the route. If this value is set and the server does not support this field host will be populated automatically. Otherwise host is left empty. The field may have multiple parts separated by a dot, but not all ingress controllers may honor the request. This field may not be changed after creation except by a user with the update routes/custom-host permission. \n Example: subdomain `frontend` automatically receives the router subdomain `apps.mycluster.com` to have a full hostname `frontend.apps.mycluster.com`." + type: string + maxLength: 253 + pattern: ^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$ + tls: + description: The tls field provides the ability to configure certificates and termination for the route. + type: object + required: + - termination + properties: + caCertificate: + description: caCertificate provides the cert authority certificate contents + type: string + certificate: + description: certificate provides certificate contents. This should be a single serving certificate, not a certificate chain. Do not include a CA certificate. + type: string + destinationCACertificate: + description: destinationCACertificate provides the contents of the ca certificate of the final destination. When using reencrypt termination this file should be provided in order to have routers use it for health checks on the secure connection. If this field is not specified, the router may provide its own destination CA and perform hostname validation using the short service name (service.namespace.svc), which allows infrastructure generated certificates to automatically verify. + type: string + externalCertificate: + description: externalCertificate provides certificate contents as a secret reference. This should be a single serving certificate, not a certificate chain. Do not include a CA certificate. The secret referenced should be present in the same namespace as that of the Route. Forbidden when `certificate` is set. + type: object + properties: + name: + description: 'name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + x-kubernetes-map-type: atomic + insecureEdgeTerminationPolicy: + description: "insecureEdgeTerminationPolicy indicates the desired behavior for insecure connections to a route. While each router may make its own decisions on which ports to expose, this is normally port 80. \n * Allow - traffic is sent to the server on the insecure port (edge/reencrypt terminations only) (default). * None - no traffic is allowed on the insecure port. * Redirect - clients are redirected to the secure port." + type: string + enum: + - Allow + - None + - Redirect + - "" + key: + description: key provides key file contents + type: string + termination: + description: "termination indicates termination type. \n * edge - TLS termination is done by the router and http is used to communicate with the backend (default) * passthrough - Traffic is sent straight to the destination without the router providing TLS termination * reencrypt - TLS termination is done by the router and https is used to communicate with the backend \n Note: passthrough termination is incompatible with httpHeader actions" + type: string + enum: + - edge + - reencrypt + - passthrough + x-kubernetes-validations: + - rule: '!(has(self.certificate) && has(self.externalCertificate))' + message: cannot have both spec.tls.certificate and spec.tls.externalCertificate + - rule: 'has(self.termination) && has(self.insecureEdgeTerminationPolicy) ? !((self.termination==''passthrough'') && (self.insecureEdgeTerminationPolicy==''Allow'')) : true' + message: 'cannot have both spec.tls.termination: passthrough and spec.tls.insecureEdgeTerminationPolicy: Allow' + to: + description: to is an object the route should use as the primary backend. Only the Service kind is allowed, and it will be defaulted to Service. If the weight field (0-256 default 100) is set to zero, no traffic will be sent to this backend. + type: object + required: + - kind + - name + properties: + kind: + description: The kind of target that the route is referring to. Currently, only 'Service' is allowed + type: string + default: Service + enum: + - Service + - "" + name: + description: name of the service/target that is being referred to. e.g. name of the service + type: string + minLength: 1 + weight: + description: weight as an integer between 0 and 256, default 100, that specifies the target's relative weight against other target reference objects. 0 suppresses requests to this backend. + type: integer + format: int32 + default: 100 + maximum: 256 + minimum: 0 + wildcardPolicy: + description: Wildcard policy if any for the route. Currently only 'Subdomain' or 'None' is allowed. + type: string + default: None + enum: + - None + - Subdomain + - "" + x-kubernetes-validations: + - rule: '!has(self.tls) || self.tls.termination != ''passthrough'' || !has(self.httpHeaders)' + message: header actions are not permitted when tls termination is passthrough. + status: + description: status is the current state of the route + type: object + properties: + ingress: + description: ingress describes the places where the route may be exposed. The list of ingress points may contain duplicate Host or RouterName values. Routes are considered live once they are `Ready` + type: array + items: + description: RouteIngress holds information about the places where a route is exposed. + type: object + properties: + conditions: + description: Conditions is the state of the route, may be empty. + type: array + items: + description: RouteIngressCondition contains details for the current condition of this route on a particular router. + type: object + required: + - status + - type + properties: + lastTransitionTime: + description: RFC 3339 date and time when this condition last transitioned + type: string + format: date-time + message: + description: Human readable message indicating details about last transition. + type: string + reason: + description: (brief) reason for the condition's last transition, and is usually a machine and human readable constant + type: string + status: + description: Status is the status of the condition. Can be True, False, Unknown. + type: string + type: + description: Type is the type of the condition. Currently only Admitted. + type: string + host: + description: Host is the host string under which the route is exposed; this value is required + type: string + routerCanonicalHostname: + description: CanonicalHostname is the external host name for the router that can be used as a CNAME for the host requested for this route. This value is optional and may not be set in all cases. + type: string + routerName: + description: Name is a name chosen by the router to identify itself; this value is required + type: string + wildcardPolicy: + description: Wildcard policy is the wildcard policy that was allowed where this route is exposed. + type: string + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/route/v1/route-TechPreviewNoUpgrade.crd.yaml b/vendor/github.com/openshift/api/route/v1/route-TechPreviewNoUpgrade.crd.yaml new file mode 100644 index 000000000..87b617cac --- /dev/null +++ b/vendor/github.com/openshift/api/route/v1/route-TechPreviewNoUpgrade.crd.yaml @@ -0,0 +1,364 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/1228 + release.openshift.io/feature-set: TechPreviewNoUpgrade + name: routes.route.openshift.io +spec: + group: route.openshift.io + names: + kind: Route + plural: routes + singular: route + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.ingress[0].host + name: Host + type: string + - jsonPath: .status.ingress[0].conditions[?(@.type=="Admitted")].status + name: Admitted + type: string + - jsonPath: .spec.to.name + name: Service + type: string + - jsonPath: .spec.tls.type + name: TLS + type: string + name: v1 + schema: + openAPIV3Schema: + description: "A route allows developers to expose services through an HTTP(S) aware load balancing and proxy layer via a public DNS entry. The route may further specify TLS options and a certificate, or specify a public CNAME that the router should also accept for HTTP and HTTPS traffic. An administrator typically configures their router to be visible outside the cluster firewall, and may also add additional security, caching, or traffic controls on the service content. Routers usually talk directly to the service endpoints. \n Once a route is created, the `host` field may not be changed. Generally, routers use the oldest route with a given host when resolving conflicts. \n Routers are subject to additional customization and may support additional controls via the annotations field. \n Because administrators may configure multiple routers, the route status field is used to return information to clients about the names and states of the route under each router. If a client chooses a duplicate name, for instance, the route status conditions are used to indicate the route cannot be chosen. \n To enable HTTP/2 ALPN on a route it requires a custom (non-wildcard) certificate. This prevents connection coalescing by clients, notably web browsers. We do not support HTTP/2 ALPN on routes that use the default certificate because of the risk of connection re-use/coalescing. Routes that do not have their own custom certificate will not be HTTP/2 ALPN-enabled on either the frontend or the backend. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec is the desired state of the route + type: object + required: + - to + properties: + alternateBackends: + description: alternateBackends allows up to 3 additional backends to be assigned to the route. Only the Service kind is allowed, and it will be defaulted to Service. Use the weight field in RouteTargetReference object to specify relative preference. + type: array + maxItems: 3 + items: + description: RouteTargetReference specifies the target that resolve into endpoints. Only the 'Service' kind is allowed. Use 'weight' field to emphasize one over others. + type: object + required: + - kind + - name + properties: + kind: + description: The kind of target that the route is referring to. Currently, only 'Service' is allowed + type: string + default: Service + enum: + - Service + - "" + name: + description: name of the service/target that is being referred to. e.g. name of the service + type: string + minLength: 1 + weight: + description: weight as an integer between 0 and 256, default 100, that specifies the target's relative weight against other target reference objects. 0 suppresses requests to this backend. + type: integer + format: int32 + default: 100 + maximum: 256 + minimum: 0 + host: + description: host is an alias/DNS that points to the service. Optional. If not specified a route name will typically be automatically chosen. Must follow DNS952 subdomain conventions. + type: string + maxLength: 253 + pattern: ^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$ + httpHeaders: + description: httpHeaders defines policy for HTTP headers. + type: object + properties: + actions: + description: 'actions specifies options for modifying headers and their values. Note that this option only applies to cleartext HTTP connections and to secure HTTP connections for which the ingress controller terminates encryption (that is, edge-terminated or reencrypt connections). Headers cannot be modified for TLS passthrough connections. Setting the HSTS (`Strict-Transport-Security`) header is not supported via actions. `Strict-Transport-Security` may only be configured using the "haproxy.router.openshift.io/hsts_header" route annotation, and only in accordance with the policy specified in Ingress.Spec.RequiredHSTSPolicies. In case of HTTP request headers, the actions specified in spec.httpHeaders.actions on the Route will be executed after the actions specified in the IngressController''s spec.httpHeaders.actions field. In case of HTTP response headers, the actions specified in spec.httpHeaders.actions on the IngressController will be executed after the actions specified in the Route''s spec.httpHeaders.actions field. The headers set via this API will not appear in access logs. Any actions defined here are applied after any actions related to the following other fields: cache-control, spec.clientTLS, spec.httpHeaders.forwardedHeaderPolicy, spec.httpHeaders.uniqueId, and spec.httpHeaders.headerNameCaseAdjustments. The following header names are reserved and may not be modified via this API: Strict-Transport-Security, Proxy, Cookie, Set-Cookie. Note that the total size of all net added headers *after* interpolating dynamic values must not exceed the value of spec.tuningOptions.headerBufferMaxRewriteBytes on the IngressController. Please refer to the documentation for that API field for more details.' + type: object + properties: + request: + description: 'request is a list of HTTP request headers to modify. Currently, actions may define to either `Set` or `Delete` headers values. Actions defined here will modify the request headers of all requests made through a route. These actions are applied to a specific Route defined within a cluster i.e. connections made through a route. Currently, actions may define to either `Set` or `Delete` headers values. Route actions will be executed after IngressController actions for request headers. Actions are applied in sequence as defined in this list. A maximum of 20 request header actions may be configured. You can use this field to specify HTTP request headers that should be set or deleted when forwarding connections from the client to your application. Sample fetchers allowed are "req.hdr" and "ssl_c_der". Converters allowed are "lower" and "base64". Example header values: "%[req.hdr(X-target),lower]", "%{+Q}[ssl_c_der,base64]". Any request header configuration applied directly via a Route resource using this API will override header configuration for a header of the same name applied via spec.httpHeaders.actions on the IngressController or route annotation. Note: This field cannot be used if your route uses TLS passthrough.' + type: array + maxItems: 20 + items: + description: RouteHTTPHeader specifies configuration for setting or deleting an HTTP header. + type: object + required: + - action + - name + properties: + action: + description: action specifies actions to perform on headers, such as setting or deleting headers. + type: object + required: + - type + properties: + set: + description: 'set defines the HTTP header that should be set: added if it doesn''t exist or replaced if it does. This field is required when type is Set and forbidden otherwise.' + type: object + required: + - value + properties: + value: + description: value specifies a header value. Dynamic values can be added. The value will be interpreted as an HAProxy format string as defined in http://cbonte.github.io/haproxy-dconv/2.6/configuration.html#8.2.6 and may use HAProxy's %[] syntax and otherwise must be a valid HTTP header value as defined in https://datatracker.ietf.org/doc/html/rfc7230#section-3.2. The value of this field must be no more than 16384 characters in length. Note that the total size of all net added headers *after* interpolating dynamic values must not exceed the value of spec.tuningOptions.headerBufferMaxRewriteBytes on the IngressController. + type: string + maxLength: 16384 + minLength: 1 + type: + description: type defines the type of the action to be applied on the header. Possible values are Set or Delete. Set allows you to set HTTP request and response headers. Delete allows you to delete HTTP request and response headers. + type: string + enum: + - Set + - Delete + x-kubernetes-validations: + - rule: 'has(self.type) && self.type == ''Set'' ? has(self.set) : !has(self.set)' + message: set is required when type is Set, and forbidden otherwise + name: + description: 'name specifies the name of a header on which to perform an action. Its value must be a valid HTTP header name as defined in RFC 2616 section 4.2. The name must consist only of alphanumeric and the following special characters, "-!#$%&''*+.^_`". The following header names are reserved and may not be modified via this API: Strict-Transport-Security, Proxy, Cookie, Set-Cookie. It must be no more than 255 characters in length. Header name must be unique.' + type: string + maxLength: 255 + minLength: 1 + pattern: ^[-!#$%&'*+.0-9A-Z^_`a-z|~]+$ + x-kubernetes-validations: + - rule: self.lowerAscii() != 'strict-transport-security' + message: strict-transport-security header may not be modified via header actions + - rule: self.lowerAscii() != 'proxy' + message: proxy header may not be modified via header actions + - rule: self.lowerAscii() != 'cookie' + message: cookie header may not be modified via header actions + - rule: self.lowerAscii() != 'set-cookie' + message: set-cookie header may not be modified via header actions + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + x-kubernetes-validations: + - rule: self.all(key, key.action.type == "Delete" || (has(key.action.set) && key.action.set.value.matches('^(?:%(?:%|(?:\\{[-+]?[QXE](?:,[-+]?[QXE])*\\})?\\[(?:req\\.hdr\\([0-9A-Za-z-]+\\)|ssl_c_der)(?:,(?:lower|base64))*\\])|[^%[:cntrl:]])+$'))) + message: Either the header value provided is not in correct format or the sample fetcher/converter specified is not allowed. The dynamic header value will be interpreted as an HAProxy format string as defined in http://cbonte.github.io/haproxy-dconv/2.6/configuration.html#8.2.6 and may use HAProxy's %[] syntax and otherwise must be a valid HTTP header value as defined in https://datatracker.ietf.org/doc/html/rfc7230#section-3.2. Sample fetchers allowed are req.hdr, ssl_c_der. Converters allowed are lower, base64. + response: + description: 'response is a list of HTTP response headers to modify. Currently, actions may define to either `Set` or `Delete` headers values. Actions defined here will modify the response headers of all requests made through a route. These actions are applied to a specific Route defined within a cluster i.e. connections made through a route. Route actions will be executed before IngressController actions for response headers. Actions are applied in sequence as defined in this list. A maximum of 20 response header actions may be configured. You can use this field to specify HTTP response headers that should be set or deleted when forwarding responses from your application to the client. Sample fetchers allowed are "res.hdr" and "ssl_c_der". Converters allowed are "lower" and "base64". Example header values: "%[res.hdr(X-target),lower]", "%{+Q}[ssl_c_der,base64]". Note: This field cannot be used if your route uses TLS passthrough.' + type: array + maxItems: 20 + items: + description: RouteHTTPHeader specifies configuration for setting or deleting an HTTP header. + type: object + required: + - action + - name + properties: + action: + description: action specifies actions to perform on headers, such as setting or deleting headers. + type: object + required: + - type + properties: + set: + description: 'set defines the HTTP header that should be set: added if it doesn''t exist or replaced if it does. This field is required when type is Set and forbidden otherwise.' + type: object + required: + - value + properties: + value: + description: value specifies a header value. Dynamic values can be added. The value will be interpreted as an HAProxy format string as defined in http://cbonte.github.io/haproxy-dconv/2.6/configuration.html#8.2.6 and may use HAProxy's %[] syntax and otherwise must be a valid HTTP header value as defined in https://datatracker.ietf.org/doc/html/rfc7230#section-3.2. The value of this field must be no more than 16384 characters in length. Note that the total size of all net added headers *after* interpolating dynamic values must not exceed the value of spec.tuningOptions.headerBufferMaxRewriteBytes on the IngressController. + type: string + maxLength: 16384 + minLength: 1 + type: + description: type defines the type of the action to be applied on the header. Possible values are Set or Delete. Set allows you to set HTTP request and response headers. Delete allows you to delete HTTP request and response headers. + type: string + enum: + - Set + - Delete + x-kubernetes-validations: + - rule: 'has(self.type) && self.type == ''Set'' ? has(self.set) : !has(self.set)' + message: set is required when type is Set, and forbidden otherwise + name: + description: 'name specifies the name of a header on which to perform an action. Its value must be a valid HTTP header name as defined in RFC 2616 section 4.2. The name must consist only of alphanumeric and the following special characters, "-!#$%&''*+.^_`". The following header names are reserved and may not be modified via this API: Strict-Transport-Security, Proxy, Cookie, Set-Cookie. It must be no more than 255 characters in length. Header name must be unique.' + type: string + maxLength: 255 + minLength: 1 + pattern: ^[-!#$%&'*+.0-9A-Z^_`a-z|~]+$ + x-kubernetes-validations: + - rule: self.lowerAscii() != 'strict-transport-security' + message: strict-transport-security header may not be modified via header actions + - rule: self.lowerAscii() != 'proxy' + message: proxy header may not be modified via header actions + - rule: self.lowerAscii() != 'cookie' + message: cookie header may not be modified via header actions + - rule: self.lowerAscii() != 'set-cookie' + message: set-cookie header may not be modified via header actions + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + x-kubernetes-validations: + - rule: self.all(key, key.action.type == "Delete" || (has(key.action.set) && key.action.set.value.matches('^(?:%(?:%|(?:\\{[-+]?[QXE](?:,[-+]?[QXE])*\\})?\\[(?:res\\.hdr\\([0-9A-Za-z-]+\\)|ssl_c_der)(?:,(?:lower|base64))*\\])|[^%[:cntrl:]])+$'))) + message: Either the header value provided is not in correct format or the sample fetcher/converter specified is not allowed. The dynamic header value will be interpreted as an HAProxy format string as defined in http://cbonte.github.io/haproxy-dconv/2.6/configuration.html#8.2.6 and may use HAProxy's %[] syntax and otherwise must be a valid HTTP header value as defined in https://datatracker.ietf.org/doc/html/rfc7230#section-3.2. Sample fetchers allowed are res.hdr, ssl_c_der. Converters allowed are lower, base64. + path: + description: path that the router watches for, to route traffic for to the service. Optional + type: string + pattern: ^/ + port: + description: If specified, the port to be used by the router. Most routers will use all endpoints exposed by the service by default - set this value to instruct routers which port to use. + type: object + required: + - targetPort + properties: + targetPort: + description: The target port on pods selected by the service this route points to. If this is a string, it will be looked up as a named port in the target endpoints port list. Required + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + subdomain: + description: "subdomain is a DNS subdomain that is requested within the ingress controller's domain (as a subdomain). If host is set this field is ignored. An ingress controller may choose to ignore this suggested name, in which case the controller will report the assigned name in the status.ingress array or refuse to admit the route. If this value is set and the server does not support this field host will be populated automatically. Otherwise host is left empty. The field may have multiple parts separated by a dot, but not all ingress controllers may honor the request. This field may not be changed after creation except by a user with the update routes/custom-host permission. \n Example: subdomain `frontend` automatically receives the router subdomain `apps.mycluster.com` to have a full hostname `frontend.apps.mycluster.com`." + type: string + maxLength: 253 + pattern: ^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$ + tls: + description: The tls field provides the ability to configure certificates and termination for the route. + type: object + required: + - termination + properties: + caCertificate: + description: caCertificate provides the cert authority certificate contents + type: string + certificate: + description: certificate provides certificate contents. This should be a single serving certificate, not a certificate chain. Do not include a CA certificate. + type: string + destinationCACertificate: + description: destinationCACertificate provides the contents of the ca certificate of the final destination. When using reencrypt termination this file should be provided in order to have routers use it for health checks on the secure connection. If this field is not specified, the router may provide its own destination CA and perform hostname validation using the short service name (service.namespace.svc), which allows infrastructure generated certificates to automatically verify. + type: string + externalCertificate: + description: externalCertificate provides certificate contents as a secret reference. This should be a single serving certificate, not a certificate chain. Do not include a CA certificate. The secret referenced should be present in the same namespace as that of the Route. Forbidden when `certificate` is set. + type: object + properties: + name: + description: 'name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + x-kubernetes-map-type: atomic + insecureEdgeTerminationPolicy: + description: "insecureEdgeTerminationPolicy indicates the desired behavior for insecure connections to a route. While each router may make its own decisions on which ports to expose, this is normally port 80. \n * Allow - traffic is sent to the server on the insecure port (edge/reencrypt terminations only) (default). * None - no traffic is allowed on the insecure port. * Redirect - clients are redirected to the secure port." + type: string + enum: + - Allow + - None + - Redirect + - "" + key: + description: key provides key file contents + type: string + termination: + description: "termination indicates termination type. \n * edge - TLS termination is done by the router and http is used to communicate with the backend (default) * passthrough - Traffic is sent straight to the destination without the router providing TLS termination * reencrypt - TLS termination is done by the router and https is used to communicate with the backend \n Note: passthrough termination is incompatible with httpHeader actions" + type: string + enum: + - edge + - reencrypt + - passthrough + x-kubernetes-validations: + - rule: '!(has(self.certificate) && has(self.externalCertificate))' + message: cannot have both spec.tls.certificate and spec.tls.externalCertificate + - rule: 'has(self.termination) && has(self.insecureEdgeTerminationPolicy) ? !((self.termination==''passthrough'') && (self.insecureEdgeTerminationPolicy==''Allow'')) : true' + message: 'cannot have both spec.tls.termination: passthrough and spec.tls.insecureEdgeTerminationPolicy: Allow' + to: + description: to is an object the route should use as the primary backend. Only the Service kind is allowed, and it will be defaulted to Service. If the weight field (0-256 default 100) is set to zero, no traffic will be sent to this backend. + type: object + required: + - kind + - name + properties: + kind: + description: The kind of target that the route is referring to. Currently, only 'Service' is allowed + type: string + default: Service + enum: + - Service + - "" + name: + description: name of the service/target that is being referred to. e.g. name of the service + type: string + minLength: 1 + weight: + description: weight as an integer between 0 and 256, default 100, that specifies the target's relative weight against other target reference objects. 0 suppresses requests to this backend. + type: integer + format: int32 + default: 100 + maximum: 256 + minimum: 0 + wildcardPolicy: + description: Wildcard policy if any for the route. Currently only 'Subdomain' or 'None' is allowed. + type: string + default: None + enum: + - None + - Subdomain + - "" + x-kubernetes-validations: + - rule: '!has(self.tls) || self.tls.termination != ''passthrough'' || !has(self.httpHeaders)' + message: header actions are not permitted when tls termination is passthrough. + status: + description: status is the current state of the route + type: object + properties: + ingress: + description: ingress describes the places where the route may be exposed. The list of ingress points may contain duplicate Host or RouterName values. Routes are considered live once they are `Ready` + type: array + items: + description: RouteIngress holds information about the places where a route is exposed. + type: object + properties: + conditions: + description: Conditions is the state of the route, may be empty. + type: array + items: + description: RouteIngressCondition contains details for the current condition of this route on a particular router. + type: object + required: + - status + - type + properties: + lastTransitionTime: + description: RFC 3339 date and time when this condition last transitioned + type: string + format: date-time + message: + description: Human readable message indicating details about last transition. + type: string + reason: + description: (brief) reason for the condition's last transition, and is usually a machine and human readable constant + type: string + status: + description: Status is the status of the condition. Can be True, False, Unknown. + type: string + type: + description: Type is the type of the condition. Currently only Admitted. + type: string + host: + description: Host is the host string under which the route is exposed; this value is required + type: string + routerCanonicalHostname: + description: CanonicalHostname is the external host name for the router that can be used as a CNAME for the host requested for this route. This value is optional and may not be set in all cases. + type: string + routerName: + description: Name is a name chosen by the router to identify itself; this value is required + type: string + wildcardPolicy: + description: Wildcard policy is the wildcard policy that was allowed where this route is exposed. + type: string + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/route/v1/route.crd.yaml b/vendor/github.com/openshift/api/route/v1/route.crd.yaml new file mode 100644 index 000000000..cda46fc33 --- /dev/null +++ b/vendor/github.com/openshift/api/route/v1/route.crd.yaml @@ -0,0 +1,407 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/1228 + name: routes.route.openshift.io +spec: + group: route.openshift.io + names: + kind: Route + plural: routes + singular: route + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.ingress[0].host + name: Host + type: string + - jsonPath: .status.ingress[0].conditions[?(@.type=="Admitted")].status + name: Admitted + type: string + - jsonPath: .spec.to.name + name: Service + type: string + - jsonPath: .spec.tls.type + name: TLS + type: string + name: v1 + schema: + openAPIV3Schema: + description: "A route allows developers to expose services through an HTTP(S) aware load balancing and proxy layer via a public DNS entry. The route may further specify TLS options and a certificate, or specify a public CNAME that the router should also accept for HTTP and HTTPS traffic. An administrator typically configures their router to be visible outside the cluster firewall, and may also add additional security, caching, or traffic controls on the service content. Routers usually talk directly to the service endpoints. \n Once a route is created, the `host` field may not be changed. Generally, routers use the oldest route with a given host when resolving conflicts. \n Routers are subject to additional customization and may support additional controls via the annotations field. \n Because administrators may configure multiple routers, the route status field is used to return information to clients about the names and states of the route under each router. If a client chooses a duplicate name, for instance, the route status conditions are used to indicate the route cannot be chosen. \n To enable HTTP/2 ALPN on a route it requires a custom (non-wildcard) certificate. This prevents connection coalescing by clients, notably web browsers. We do not support HTTP/2 ALPN on routes that use the default certificate because of the risk of connection re-use/coalescing. Routes that do not have their own custom certificate will not be HTTP/2 ALPN-enabled on either the frontend or the backend. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + allOf: + - anyOf: + - properties: + path: + maxLength: 0 + - properties: + tls: + enum: + - null + - not: + properties: + tls: + properties: + termination: + enum: + - passthrough + - anyOf: + - not: + properties: + host: + maxLength: 0 + - not: + properties: + wildcardPolicy: + enum: + - Subdomain + description: spec is the desired state of the route + properties: + alternateBackends: + description: alternateBackends allows up to 3 additional backends to be assigned to the route. Only the Service kind is allowed, and it will be defaulted to Service. Use the weight field in RouteTargetReference object to specify relative preference. + items: + description: RouteTargetReference specifies the target that resolve into endpoints. Only the 'Service' kind is allowed. Use 'weight' field to emphasize one over others. + properties: + kind: + default: Service + description: The kind of target that the route is referring to. Currently, only 'Service' is allowed + enum: + - Service + - "" + type: string + name: + description: name of the service/target that is being referred to. e.g. name of the service + minLength: 1 + type: string + weight: + default: 100 + description: weight as an integer between 0 and 256, default 100, that specifies the target's relative weight against other target reference objects. 0 suppresses requests to this backend. + format: int32 + maximum: 256 + minimum: 0 + type: integer + required: + - kind + - name + type: object + maxItems: 3 + type: array + host: + description: host is an alias/DNS that points to the service. Optional. If not specified a route name will typically be automatically chosen. Must follow DNS952 subdomain conventions. + maxLength: 253 + pattern: ^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$ + type: string + httpHeaders: + description: httpHeaders defines policy for HTTP headers. + properties: + actions: + description: 'actions specifies options for modifying headers and their values. Note that this option only applies to cleartext HTTP connections and to secure HTTP connections for which the ingress controller terminates encryption (that is, edge-terminated or reencrypt connections). Headers cannot be modified for TLS passthrough connections. Setting the HSTS (`Strict-Transport-Security`) header is not supported via actions. `Strict-Transport-Security` may only be configured using the "haproxy.router.openshift.io/hsts_header" route annotation, and only in accordance with the policy specified in Ingress.Spec.RequiredHSTSPolicies. In case of HTTP request headers, the actions specified in spec.httpHeaders.actions on the Route will be executed after the actions specified in the IngressController''s spec.httpHeaders.actions field. In case of HTTP response headers, the actions specified in spec.httpHeaders.actions on the IngressController will be executed after the actions specified in the Route''s spec.httpHeaders.actions field. The headers set via this API will not appear in access logs. Any actions defined here are applied after any actions related to the following other fields: cache-control, spec.clientTLS, spec.httpHeaders.forwardedHeaderPolicy, spec.httpHeaders.uniqueId, and spec.httpHeaders.headerNameCaseAdjustments. The following header names are reserved and may not be modified via this API: Strict-Transport-Security, Proxy, Cookie, Set-Cookie. Note that the total size of all net added headers *after* interpolating dynamic values must not exceed the value of spec.tuningOptions.headerBufferMaxRewriteBytes on the IngressController. Please refer to the documentation for that API field for more details.' + properties: + request: + description: 'request is a list of HTTP request headers to modify. Currently, actions may define to either `Set` or `Delete` headers values. Actions defined here will modify the request headers of all requests made through a route. These actions are applied to a specific Route defined within a cluster i.e. connections made through a route. Currently, actions may define to either `Set` or `Delete` headers values. Route actions will be executed after IngressController actions for request headers. Actions are applied in sequence as defined in this list. A maximum of 20 request header actions may be configured. You can use this field to specify HTTP request headers that should be set or deleted when forwarding connections from the client to your application. Sample fetchers allowed are "req.hdr" and "ssl_c_der". Converters allowed are "lower" and "base64". Example header values: "%[req.hdr(X-target),lower]", "%{+Q}[ssl_c_der,base64]". Any request header configuration applied directly via a Route resource using this API will override header configuration for a header of the same name applied via spec.httpHeaders.actions on the IngressController or route annotation. Note: This field cannot be used if your route uses TLS passthrough.' + items: + description: RouteHTTPHeader specifies configuration for setting or deleting an HTTP header. + properties: + action: + description: action specifies actions to perform on headers, such as setting or deleting headers. + properties: + set: + description: 'set defines the HTTP header that should be set: added if it doesn''t exist or replaced if it does. This field is required when type is Set and forbidden otherwise.' + properties: + value: + description: value specifies a header value. Dynamic values can be added. The value will be interpreted as an HAProxy format string as defined in http://cbonte.github.io/haproxy-dconv/2.6/configuration.html#8.2.6 and may use HAProxy's %[] syntax and otherwise must be a valid HTTP header value as defined in https://datatracker.ietf.org/doc/html/rfc7230#section-3.2. The value of this field must be no more than 16384 characters in length. Note that the total size of all net added headers *after* interpolating dynamic values must not exceed the value of spec.tuningOptions.headerBufferMaxRewriteBytes on the IngressController. + maxLength: 16384 + minLength: 1 + type: string + required: + - value + type: object + type: + description: type defines the type of the action to be applied on the header. Possible values are Set or Delete. Set allows you to set HTTP request and response headers. Delete allows you to delete HTTP request and response headers. + enum: + - Set + - Delete + type: string + required: + - type + type: object + x-kubernetes-validations: + - message: set is required when type is Set, and forbidden otherwise + rule: 'has(self.type) && self.type == ''Set'' ? has(self.set) : !has(self.set)' + name: + description: 'name specifies the name of a header on which to perform an action. Its value must be a valid HTTP header name as defined in RFC 2616 section 4.2. The name must consist only of alphanumeric and the following special characters, "-!#$%&''*+.^_`". The following header names are reserved and may not be modified via this API: Strict-Transport-Security, Proxy, Cookie, Set-Cookie. It must be no more than 255 characters in length. Header name must be unique.' + maxLength: 255 + minLength: 1 + pattern: ^[-!#$%&'*+.0-9A-Z^_`a-z|~]+$ + type: string + x-kubernetes-validations: + - message: strict-transport-security header may not be modified via header actions + rule: self.lowerAscii() != 'strict-transport-security' + - message: proxy header may not be modified via header actions + rule: self.lowerAscii() != 'proxy' + - message: cookie header may not be modified via header actions + rule: self.lowerAscii() != 'cookie' + - message: set-cookie header may not be modified via header actions + rule: self.lowerAscii() != 'set-cookie' + required: + - action + - name + type: object + maxItems: 20 + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + x-kubernetes-validations: + - message: Either the header value provided is not in correct format or the sample fetcher/converter specified is not allowed. The dynamic header value will be interpreted as an HAProxy format string as defined in http://cbonte.github.io/haproxy-dconv/2.6/configuration.html#8.2.6 and may use HAProxy's %[] syntax and otherwise must be a valid HTTP header value as defined in https://datatracker.ietf.org/doc/html/rfc7230#section-3.2. Sample fetchers allowed are req.hdr, ssl_c_der. Converters allowed are lower, base64. + rule: self.all(key, key.action.type == "Delete" || (has(key.action.set) && key.action.set.value.matches('^(?:%(?:%|(?:\\{[-+]?[QXE](?:,[-+]?[QXE])*\\})?\\[(?:req\\.hdr\\([0-9A-Za-z-]+\\)|ssl_c_der)(?:,(?:lower|base64))*\\])|[^%[:cntrl:]])+$'))) + response: + description: 'response is a list of HTTP response headers to modify. Currently, actions may define to either `Set` or `Delete` headers values. Actions defined here will modify the response headers of all requests made through a route. These actions are applied to a specific Route defined within a cluster i.e. connections made through a route. Route actions will be executed before IngressController actions for response headers. Actions are applied in sequence as defined in this list. A maximum of 20 response header actions may be configured. You can use this field to specify HTTP response headers that should be set or deleted when forwarding responses from your application to the client. Sample fetchers allowed are "res.hdr" and "ssl_c_der". Converters allowed are "lower" and "base64". Example header values: "%[res.hdr(X-target),lower]", "%{+Q}[ssl_c_der,base64]". Note: This field cannot be used if your route uses TLS passthrough.' + items: + description: RouteHTTPHeader specifies configuration for setting or deleting an HTTP header. + properties: + action: + description: action specifies actions to perform on headers, such as setting or deleting headers. + properties: + set: + description: 'set defines the HTTP header that should be set: added if it doesn''t exist or replaced if it does. This field is required when type is Set and forbidden otherwise.' + properties: + value: + description: value specifies a header value. Dynamic values can be added. The value will be interpreted as an HAProxy format string as defined in http://cbonte.github.io/haproxy-dconv/2.6/configuration.html#8.2.6 and may use HAProxy's %[] syntax and otherwise must be a valid HTTP header value as defined in https://datatracker.ietf.org/doc/html/rfc7230#section-3.2. The value of this field must be no more than 16384 characters in length. Note that the total size of all net added headers *after* interpolating dynamic values must not exceed the value of spec.tuningOptions.headerBufferMaxRewriteBytes on the IngressController. + maxLength: 16384 + minLength: 1 + type: string + required: + - value + type: object + type: + description: type defines the type of the action to be applied on the header. Possible values are Set or Delete. Set allows you to set HTTP request and response headers. Delete allows you to delete HTTP request and response headers. + enum: + - Set + - Delete + type: string + required: + - type + type: object + x-kubernetes-validations: + - message: set is required when type is Set, and forbidden otherwise + rule: 'has(self.type) && self.type == ''Set'' ? has(self.set) : !has(self.set)' + name: + description: 'name specifies the name of a header on which to perform an action. Its value must be a valid HTTP header name as defined in RFC 2616 section 4.2. The name must consist only of alphanumeric and the following special characters, "-!#$%&''*+.^_`". The following header names are reserved and may not be modified via this API: Strict-Transport-Security, Proxy, Cookie, Set-Cookie. It must be no more than 255 characters in length. Header name must be unique.' + maxLength: 255 + minLength: 1 + pattern: ^[-!#$%&'*+.0-9A-Z^_`a-z|~]+$ + type: string + x-kubernetes-validations: + - message: strict-transport-security header may not be modified via header actions + rule: self.lowerAscii() != 'strict-transport-security' + - message: proxy header may not be modified via header actions + rule: self.lowerAscii() != 'proxy' + - message: cookie header may not be modified via header actions + rule: self.lowerAscii() != 'cookie' + - message: set-cookie header may not be modified via header actions + rule: self.lowerAscii() != 'set-cookie' + required: + - action + - name + type: object + maxItems: 20 + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + x-kubernetes-validations: + - message: Either the header value provided is not in correct format or the sample fetcher/converter specified is not allowed. The dynamic header value will be interpreted as an HAProxy format string as defined in http://cbonte.github.io/haproxy-dconv/2.6/configuration.html#8.2.6 and may use HAProxy's %[] syntax and otherwise must be a valid HTTP header value as defined in https://datatracker.ietf.org/doc/html/rfc7230#section-3.2. Sample fetchers allowed are res.hdr, ssl_c_der. Converters allowed are lower, base64. + rule: self.all(key, key.action.type == "Delete" || (has(key.action.set) && key.action.set.value.matches('^(?:%(?:%|(?:\\{[-+]?[QXE](?:,[-+]?[QXE])*\\})?\\[(?:res\\.hdr\\([0-9A-Za-z-]+\\)|ssl_c_der)(?:,(?:lower|base64))*\\])|[^%[:cntrl:]])+$'))) + type: object + type: object + path: + description: path that the router watches for, to route traffic for to the service. Optional + pattern: ^/ + type: string + port: + description: If specified, the port to be used by the router. Most routers will use all endpoints exposed by the service by default - set this value to instruct routers which port to use. + properties: + targetPort: + allOf: + - not: + enum: + - 0 + - not: + enum: + - "" + x-kubernetes-int-or-string: true + required: + - targetPort + type: object + subdomain: + description: "subdomain is a DNS subdomain that is requested within the ingress controller's domain (as a subdomain). If host is set this field is ignored. An ingress controller may choose to ignore this suggested name, in which case the controller will report the assigned name in the status.ingress array or refuse to admit the route. If this value is set and the server does not support this field host will be populated automatically. Otherwise host is left empty. The field may have multiple parts separated by a dot, but not all ingress controllers may honor the request. This field may not be changed after creation except by a user with the update routes/custom-host permission. \n Example: subdomain `frontend` automatically receives the router subdomain `apps.mycluster.com` to have a full hostname `frontend.apps.mycluster.com`." + maxLength: 253 + pattern: ^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$ + type: string + tls: + allOf: + - anyOf: + - properties: + caCertificate: + maxLength: 0 + certificate: + maxLength: 0 + destinationCACertificate: + maxLength: 0 + key: + maxLength: 0 + - not: + properties: + termination: + enum: + - passthrough + - anyOf: + - properties: + destinationCACertificate: + maxLength: 0 + - not: + properties: + termination: + enum: + - edge + description: The tls field provides the ability to configure certificates and termination for the route. + properties: + caCertificate: + description: caCertificate provides the cert authority certificate contents + type: string + certificate: + description: certificate provides certificate contents. This should be a single serving certificate, not a certificate chain. Do not include a CA certificate. + type: string + destinationCACertificate: + description: destinationCACertificate provides the contents of the ca certificate of the final destination. When using reencrypt termination this file should be provided in order to have routers use it for health checks on the secure connection. If this field is not specified, the router may provide its own destination CA and perform hostname validation using the short service name (service.namespace.svc), which allows infrastructure generated certificates to automatically verify. + type: string + insecureEdgeTerminationPolicy: + description: "insecureEdgeTerminationPolicy indicates the desired behavior for insecure connections to a route. While each router may make its own decisions on which ports to expose, this is normally port 80. \n * Allow - traffic is sent to the server on the insecure port (edge/reencrypt terminations only) (default). * None - no traffic is allowed on the insecure port. * Redirect - clients are redirected to the secure port." + enum: + - Allow + - None + - Redirect + - "" + type: string + key: + description: key provides key file contents + type: string + termination: + description: "termination indicates termination type. \n * edge - TLS termination is done by the router and http is used to communicate with the backend (default) * passthrough - Traffic is sent straight to the destination without the router providing TLS termination * reencrypt - TLS termination is done by the router and https is used to communicate with the backend \n Note: passthrough termination is incompatible with httpHeader actions" + enum: + - edge + - reencrypt + - passthrough + type: string + required: + - termination + type: object + x-kubernetes-validations: + - message: 'cannot have both spec.tls.termination: passthrough and spec.tls.insecureEdgeTerminationPolicy: Allow' + rule: 'has(self.termination) && has(self.insecureEdgeTerminationPolicy) ? !((self.termination==''passthrough'') && (self.insecureEdgeTerminationPolicy==''Allow'')) : true' + to: + description: to is an object the route should use as the primary backend. Only the Service kind is allowed, and it will be defaulted to Service. If the weight field (0-256 default 100) is set to zero, no traffic will be sent to this backend. + properties: + kind: + default: Service + description: The kind of target that the route is referring to. Currently, only 'Service' is allowed + enum: + - Service + - "" + type: string + name: + description: name of the service/target that is being referred to. e.g. name of the service + minLength: 1 + type: string + weight: + default: 100 + description: weight as an integer between 0 and 256, default 100, that specifies the target's relative weight against other target reference objects. 0 suppresses requests to this backend. + format: int32 + maximum: 256 + minimum: 0 + type: integer + required: + - kind + - name + type: object + wildcardPolicy: + default: None + description: Wildcard policy if any for the route. Currently only 'Subdomain' or 'None' is allowed. + enum: + - None + - Subdomain + - "" + type: string + required: + - to + type: object + x-kubernetes-validations: + - message: header actions are not permitted when tls termination is passthrough. + rule: '!has(self.tls) || self.tls.termination != ''passthrough'' || !has(self.httpHeaders)' + status: + description: status is the current state of the route + properties: + ingress: + description: ingress describes the places where the route may be exposed. The list of ingress points may contain duplicate Host or RouterName values. Routes are considered live once they are `Ready` + items: + description: RouteIngress holds information about the places where a route is exposed. + properties: + conditions: + description: Conditions is the state of the route, may be empty. + items: + description: RouteIngressCondition contains details for the current condition of this route on a particular router. + properties: + lastTransitionTime: + description: RFC 3339 date and time when this condition last transitioned + format: date-time + type: string + message: + description: Human readable message indicating details about last transition. + type: string + reason: + description: (brief) reason for the condition's last transition, and is usually a machine and human readable constant + type: string + status: + description: Status is the status of the condition. Can be True, False, Unknown. + type: string + type: + description: Type is the type of the condition. Currently only Admitted. + type: string + required: + - status + - type + type: object + type: array + host: + description: Host is the host string under which the route is exposed; this value is required + type: string + routerCanonicalHostname: + description: CanonicalHostname is the external host name for the router that can be used as a CNAME for the host requested for this route. This value is optional and may not be set in all cases. + type: string + routerName: + description: Name is a name chosen by the router to identify itself; this value is required + type: string + wildcardPolicy: + description: Wildcard policy is the wildcard policy that was allowed where this route is exposed. + type: string + type: object + type: array + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/route/v1/route.crd.yaml-patch b/vendor/github.com/openshift/api/route/v1/route.crd.yaml-patch new file mode 100644 index 000000000..7f09302f3 --- /dev/null +++ b/vendor/github.com/openshift/api/route/v1/route.crd.yaml-patch @@ -0,0 +1,67 @@ +- op: add + path: /spec/versions/name=v1/schema/openAPIV3Schema/properties/spec/allOf + value: + # spec.path must be empty when using passthrough TLS. + - anyOf: + - properties: + path: + maxLength: 0 + - properties: + tls: + enum: [null] + - not: + properties: + tls: + properties: + termination: + enum: ["passthrough"] + # spec.host must be nonempty for a wildcard route. + - anyOf: + - not: + properties: + host: + maxLength: 0 + - not: + properties: + wildcardPolicy: + enum: ["Subdomain"] +- op: add + path: /spec/versions/name=v1/schema/openAPIV3Schema/properties/spec/properties/port/properties/targetPort + value: + # spec.port.targetPort cannot be the integer 0 or the empty string. (Note + # that negative integer values are allowed, as is the string "0".) + allOf: + - not: + enum: [0] + - not: + enum: [""] + x-kubernetes-int-or-string: true +- op: add + path: /spec/versions/name=v1/schema/openAPIV3Schema/properties/spec/properties/tls/allOf + value: + # spec.tls.certificate, spec.tls.key, spec.tls.caCertificate, and + # spec.tls.destinationCACertificate must omitted when using passthrough TLS. + - anyOf: + - properties: + certificate: + maxLength: 0 + key: + maxLength: 0 + caCertificate: + maxLength: 0 + destinationCACertificate: + maxLength: 0 + - not: + properties: + termination: + enum: ["passthrough"] + # spec.tls.destinationCACertificate must be omitted when using edge-terminated + # TLS. + - anyOf: + - properties: + destinationCACertificate: + maxLength: 0 + - not: + properties: + termination: + enum: ["edge"] diff --git a/vendor/github.com/openshift/api/route/v1/stable.route.testsuite.yaml b/vendor/github.com/openshift/api/route/v1/stable.route.testsuite.yaml new file mode 100644 index 000000000..d1e476673 --- /dev/null +++ b/vendor/github.com/openshift/api/route/v1/stable.route.testsuite.yaml @@ -0,0 +1,675 @@ +apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this +name: "[Stable] Route" +crd: route.crd.yaml +tests: + onCreate: + - name: Should be able to create a minimal Route + initial: | + apiVersion: route.openshift.io/v1 + kind: Route + spec: + to: + kind: Service + name: foo + expected: | + apiVersion: route.openshift.io/v1 + kind: Route + spec: + to: + kind: Service + name: foo + weight: 100 + wildcardPolicy: None + - name: "cannot have both spec.tls.termination: passthrough and spec.tls.insecureEdgeTerminationPolicy: Allow" + initial: | + apiVersion: route.openshift.io/v1 + kind: Route + spec: + to: + kind: Service + name: foo + tls: + termination: passthrough + insecureEdgeTerminationPolicy: Allow + expectedError: "cannot have both spec.tls.termination: passthrough and spec.tls.insecureEdgeTerminationPolicy: Allow" + - name: "spec.tls.termination: passthrough is compatible with spec.tls.insecureEdgeTerminationPolicy: Redirect" + initial: | + apiVersion: route.openshift.io/v1 + kind: Route + spec: + host: test.foo + to: + kind: Service + name: foo + tls: + termination: passthrough + insecureEdgeTerminationPolicy: Redirect + expected: | + apiVersion: route.openshift.io/v1 + kind: Route + spec: + host: test.foo + to: + kind: Service + name: foo + weight: 100 + tls: + termination: passthrough + insecureEdgeTerminationPolicy: Redirect + wildcardPolicy: None + - name: "spec.tls.termination: passthrough is compatible with spec.tls.insecureEdgeTerminationPolicy: None" + initial: | + apiVersion: route.openshift.io/v1 + kind: Route + spec: + host: test.foo + to: + kind: Service + name: foo + tls: + termination: passthrough + insecureEdgeTerminationPolicy: None + expected: | + apiVersion: route.openshift.io/v1 + kind: Route + spec: + host: test.foo + to: + kind: Service + name: foo + weight: 100 + tls: + termination: passthrough + insecureEdgeTerminationPolicy: None + wildcardPolicy: None + - name: Should be able to create a Route with valid actions + initial: | + apiVersion: route.openshift.io/v1 + kind: Route + metadata: + labels: + type: sharded + name: hello-openshift-actions + namespace: hello-openshift + spec: + subdomain: hello-openshift + tls: + termination: edge + to: + kind: Service + name: hello-openshift + httpHeaders: + actions: + response: + - name: X-Frame-Options + action: + type: Set + set: + value: DENY + - name: X-Cache-Info + action: + type: Set + set: + value: "not cacheable; meta data too large" + - name: X-XSS-Protection + action: + type: Delete + - name: X-Source + action: + type: Set + set: + value: "%[res.hdr(X-Value),lower]" + request: + - name: Content-Location + action: + type: Set + set: + value: /my-first-blog-post + - name: X-SSL-Client-Cert + action: + type: Set + set: + value: "%{+Q}[ssl_c_der,base64]" + - name: Content-Language + action: + type: Delete + - name: X-Target + action: + type: Set + set: + value: "%[req.hdr(host),lower]" + - name: X-Conditional + action: + type: Set + set: + value: "%[req.hdr(Host)] if foo" + - name: X-Condition + action: + type: Set + set: + value: "%[req.hdr(Host)]\ if\ foo" + expected: | + apiVersion: route.openshift.io/v1 + kind: Route + metadata: + labels: + type: sharded + name: hello-openshift-actions + namespace: hello-openshift + spec: + subdomain: hello-openshift + tls: + termination: edge + to: + kind: Service + name: hello-openshift + weight: 100 + wildcardPolicy: None + httpHeaders: + actions: + response: + - name: X-Frame-Options + action: + type: Set + set: + value: DENY + - name: X-Cache-Info + action: + type: Set + set: + value: "not cacheable; meta data too large" + - name: X-XSS-Protection + action: + type: Delete + - name: X-Source + action: + type: Set + set: + value: "%[res.hdr(X-Value),lower]" + request: + - name: Content-Location + action: + type: Set + set: + value: /my-first-blog-post + - name: X-SSL-Client-Cert + action: + type: Set + set: + value: "%{+Q}[ssl_c_der,base64]" + - name: Content-Language + action: + type: Delete + - name: X-Target + action: + type: Set + set: + value: "%[req.hdr(host),lower]" + - name: X-Conditional + action: + type: Set + set: + value: "%[req.hdr(Host)] if foo" + - name: X-Condition + action: + type: Set + set: + value: "%[req.hdr(Host)]\ if\ foo" + - name: "Should not allow response header actions if tls termination is set to passthrough" + initial: | + apiVersion: route.openshift.io/v1 + kind: Route + metadata: + labels: + type: sharded + name: hello-openshift-passthrough + namespace: hello-openshift + spec: + subdomain: hello-openshift + tls: + termination: passthrough + to: + kind: Service + name: hello-openshift + httpHeaders: + actions: + response: + - name: X-Frame-Options + action: + type: Set + set: + value: DENY + - name: X-XSS-Protection + action: + type: Delete + expectedError: "header actions are not permitted when tls termination is passthrough." + - name: "Should not allow request header actions if tls termination is set to passthrough" + initial: | + apiVersion: route.openshift.io/v1 + kind: Route + metadata: + labels: + type: sharded + name: hello-openshift-passthrough + namespace: hello-openshift + spec: + subdomain: hello-openshift + tls: + termination: passthrough + to: + kind: Service + name: hello-openshift + httpHeaders: + actions: + request: + - name: Content-Location + action: + type: Set + set: + value: /my-first-blog-post + - name: X-SSL-Client-Cert + action: + type: Set + set: + value: "%{+Q}[ssl_c_der,base64]" + - name: Content-Language + action: + type: Delete + - name: X-Target + action: + type: Set + set: + value: "%[req.hdr(host),lower]" + expectedError: "header actions are not permitted when tls termination is passthrough." + - name: Should not allow to set/delete HSTS header. + initial: | + apiVersion: route.openshift.io/v1 + kind: Route + metadata: + labels: + type: sharded + name: hello-openshift-edge-hsts + namespace: hello-openshift + spec: + subdomain: hello-openshift + tls: + termination: edge + to: + kind: Service + name: hello-openshift + httpHeaders: + actions: + response: + - name: X-Frame-Options + action: + type: Set + set: + value: DENY + - name: Strict-Transport-Security + action: + type: Delete + request: + - name: Content-Location + action: + type: Set + set: + value: /my-first-blog-post + - name: Content-Language + action: + type: Delete + expectedError: "strict-transport-security header may not be modified via header actions" + - name: Should not allow to set proxy request header. + initial: | + apiVersion: route.openshift.io/v1 + kind: Route + metadata: + name: hello-openshift-edge-proxy + namespace: hello-openshift + spec: + subdomain: hello-openshift + tls: + termination: edge + to: + kind: Service + name: hello-openshift + httpHeaders: + actions: + request: + - name: Proxy + action: + type: Set + set: + value: example.xyz + expectedError: "proxy header may not be modified via header actions" + - name: Should not allow to set cookie header. + initial: | + apiVersion: route.openshift.io/v1 + kind: Route + metadata: + name: hello-openshift-edge-proxy + namespace: hello-openshift + spec: + subdomain: hello-openshift + tls: + termination: edge + to: + kind: Service + name: hello-openshift + httpHeaders: + actions: + request: + - name: Cookie + action: + type: Set + set: + value: "PHPSESSID=298zf09hf012fh2; csrftoken=u32t4o3tb3gg43; _gat=1" + expectedError: "cookie header may not be modified via header actions" + - name: Should not allow to set set-cookie header. + initial: | + apiVersion: route.openshift.io/v1 + kind: Route + metadata: + name: hello-openshift-edge-proxy + namespace: hello-openshift + spec: + subdomain: hello-openshift + tls: + termination: edge + to: + kind: Service + name: hello-openshift + httpHeaders: + actions: + response: + - name: Set-Cookie + action: + type: Set + set: + value: "sessionId=e8bb43229de9; Domain=foo.example.com" + expectedError: "set-cookie header may not be modified via header actions" + - name: Should not allow to set/delete dynamic headers with unclosed braces. + initial: | + apiVersion: route.openshift.io/v1 + kind: Route + metadata: + labels: + type: sharded + name: hello-openshift-edge-unclosed-braces + namespace: hello-openshift + spec: + subdomain: hello-openshift + tls: + termination: edge + to: + kind: Service + name: hello-openshift + httpHeaders: + actions: + request: + - name: Content-Location + action: + type: Set + set: + value: /my-first-blog-post + - name: Content-Language + action: + type: Delete + - name: expires + action: + type: Set + set: + value: "%[req.hdr(host),lower" + expectedError: "Either the header value provided is not in correct format or the sample fetcher/converter specified is not allowed. The dynamic header value will be interpreted as an HAProxy format string as defined in http://cbonte.github.io/haproxy-dconv/2.6/configuration.html#8.2.6 and may use HAProxy's %[] syntax and otherwise must be a valid HTTP header value as defined in https://datatracker.ietf.org/doc/html/rfc7230#section-3.2. Sample fetchers allowed are req.hdr, ssl_c_der. Converters allowed are lower, base64." + - name: Should not allow to set dynamic response header values with not allowed sample fetchers. + initial: | + apiVersion: route.openshift.io/v1 + kind: Route + metadata: + labels: + type: sharded + name: hello-openshift-edge-not-allowed-values + namespace: hello-openshift + spec: + subdomain: hello-openshift + tls: + termination: edge + to: + kind: Service + name: hello-openshift + httpHeaders: + actions: + response: + - name: X-Target + action: + type: Set + set: + value: "%{+Q}[ssl_c_der1,base64]" + expectedError: "Either the header value provided is not in correct format or the sample fetcher/converter specified is not allowed. The dynamic header value will be interpreted as an HAProxy format string as defined in http://cbonte.github.io/haproxy-dconv/2.6/configuration.html#8.2.6 and may use HAProxy's %[] syntax and otherwise must be a valid HTTP header value as defined in https://datatracker.ietf.org/doc/html/rfc7230#section-3.2. Sample fetchers allowed are res.hdr, ssl_c_der. Converters allowed are lower, base64." + - name: Should not allow to set/delete dynamic response header values with not allowed converters. + initial: | + apiVersion: route.openshift.io/v1 + kind: Route + metadata: + labels: + type: sharded + name: hello-openshift-edge-not-allowed-values + namespace: hello-openshift + spec: + subdomain: hello-openshift + tls: + termination: edge + to: + kind: Service + name: hello-openshift + httpHeaders: + actions: + response: + - name: X-Target + action: + type: Set + set: + value: "%{+Q}[ssl_c_der,bogus]" + expectedError: "Either the header value provided is not in correct format or the sample fetcher/converter specified is not allowed. The dynamic header value will be interpreted as an HAProxy format string as defined in http://cbonte.github.io/haproxy-dconv/2.6/configuration.html#8.2.6 and may use HAProxy's %[] syntax and otherwise must be a valid HTTP header value as defined in https://datatracker.ietf.org/doc/html/rfc7230#section-3.2. Sample fetchers allowed are res.hdr, ssl_c_der. Converters allowed are lower, base64." + - name: Should not allow to set/delete dynamic response header values containing req.hdr fetcher. + initial: | + apiVersion: route.openshift.io/v1 + kind: Route + metadata: + labels: + type: sharded + name: hello-openshift-edge-not-allowed-values + namespace: hello-openshift + spec: + subdomain: hello-openshift + tls: + termination: edge + to: + kind: Service + name: hello-openshift + httpHeaders: + actions: + response: + - name: X-Target + action: + type: Set + set: + value: "%[req.hdr(host),lower]" + expectedError: "Either the header value provided is not in correct format or the sample fetcher/converter specified is not allowed. The dynamic header value will be interpreted as an HAProxy format string as defined in http://cbonte.github.io/haproxy-dconv/2.6/configuration.html#8.2.6 and may use HAProxy's %[] syntax and otherwise must be a valid HTTP header value as defined in https://datatracker.ietf.org/doc/html/rfc7230#section-3.2. Sample fetchers allowed are res.hdr, ssl_c_der. Converters allowed are lower, base64." + - name: Should not allow to set/delete dynamic response header values containing req.hdr fetcher. + initial: | + apiVersion: route.openshift.io/v1 + kind: Route + metadata: + labels: + type: sharded + name: hello-openshift-edge-not-allowed-values + namespace: hello-openshift + spec: + subdomain: hello-openshift + tls: + termination: edge + to: + kind: Service + name: hello-openshift + httpHeaders: + actions: + request: + - name: X-Source + action: + type: Set + set: + value: "%[res.hdr(X-Value),lower]" + expectedError: "Either the header value provided is not in correct format or the sample fetcher/converter specified is not allowed. The dynamic header value will be interpreted as an HAProxy format string as defined in http://cbonte.github.io/haproxy-dconv/2.6/configuration.html#8.2.6 and may use HAProxy's %[] syntax and otherwise must be a valid HTTP header value as defined in https://datatracker.ietf.org/doc/html/rfc7230#section-3.2. Sample fetchers allowed are req.hdr, ssl_c_der. Converters allowed are lower, base64." + - name: Should not allow to set/delete dynamic request header values with not allowed converters. + initial: | + apiVersion: route.openshift.io/v1 + kind: Route + metadata: + labels: + type: sharded + name: hello-openshift-edge-not-allowed-values + namespace: hello-openshift + spec: + subdomain: hello-openshift + tls: + termination: edge + to: + kind: Service + name: hello-openshift + httpHeaders: + actions: + request: + - name: X-SSL-Client-Cert + action: + type: Set + set: + value: "%{+Q}[ssl_c_der,bogus]" + - name: Content-Language + action: + type: Delete + expectedError: "Either the header value provided is not in correct format or the sample fetcher/converter specified is not allowed. The dynamic header value will be interpreted as an HAProxy format string as defined in http://cbonte.github.io/haproxy-dconv/2.6/configuration.html#8.2.6 and may use HAProxy's %[] syntax and otherwise must be a valid HTTP header value as defined in https://datatracker.ietf.org/doc/html/rfc7230#section-3.2. Sample fetchers allowed are req.hdr, ssl_c_der. Converters allowed are lower, base64." + - name: Should not allow to set dynamic request header values with not allowed sample fetchers. + initial: | + apiVersion: route.openshift.io/v1 + kind: Route + metadata: + labels: + type: sharded + name: hello-openshift-edge-not-allowed-values + namespace: hello-openshift + spec: + subdomain: hello-openshift + tls: + termination: edge + to: + kind: Service + name: hello-openshift + httpHeaders: + actions: + request: + - name: X-SSL-Client-Cert + action: + type: Set + set: + value: "%{+Q}[ssl_c_der1122,base64]" + - name: Content-Language + action: + type: Delete + expectedError: "Either the header value provided is not in correct format or the sample fetcher/converter specified is not allowed. The dynamic header value will be interpreted as an HAProxy format string as defined in http://cbonte.github.io/haproxy-dconv/2.6/configuration.html#8.2.6 and may use HAProxy's %[] syntax and otherwise must be a valid HTTP header value as defined in https://datatracker.ietf.org/doc/html/rfc7230#section-3.2. Sample fetchers allowed are req.hdr, ssl_c_der. Converters allowed are lower, base64." + - name: Should not allow empty value in request + initial: | + apiVersion: route.openshift.io/v1 + kind: Route + metadata: + labels: + type: sharded + name: hello-openshift-edge-not-allowed-values + namespace: hello-openshift + spec: + subdomain: hello-openshift + tls: + termination: edge + to: + kind: Service + name: hello-openshift + httpHeaders: + actions: + request: + - name: X-SSL-Client-Cert + action: + type: Set + set: + value: + expectedError: 'Route.route.openshift.io "hello-openshift-edge-not-allowed-values" is invalid: [spec.httpHeaders.actions.request[0].action.set.value: Required value, : Invalid value: "null": some validation rules were not checked because the object was invalid; correct the existing errors to complete validation]' + - name: Should not allow empty value in response + initial: | + apiVersion: route.openshift.io/v1 + kind: Route + metadata: + labels: + type: sharded + name: hello-openshift-edge-not-allowed-values + namespace: hello-openshift + spec: + subdomain: hello-openshift + tls: + termination: edge + to: + kind: Service + name: hello-openshift + httpHeaders: + actions: + response: + - name: X-SSL-Client-Cert + action: + type: Set + set: + value: + expectedError: 'Route.route.openshift.io "hello-openshift-edge-not-allowed-values" is invalid: [spec.httpHeaders.actions.response[0].action.set.value: Required value, : Invalid value: "null": some validation rules were not checked because the object was invalid; correct the existing errors to complete validation]' + - name: Should be required to specify the set field when the discriminant type is Set. + initial: | + apiVersion: route.openshift.io/v1 + kind: Route + metadata: + labels: + type: sharded + name: hello-openshift-actions + namespace: hello-openshift + spec: + subdomain: hello-openshift + tls: + termination: edge + to: + kind: Service + name: hello-openshift + httpHeaders: + actions: + response: + - name: X-Frame-Options + action: + type: Set + expectedError: "set is required when type is Set, and forbidden otherwise" + - name: Should be required to specify the set field when the discriminant type is Set. + initial: | + apiVersion: route.openshift.io/v1 + kind: Route + metadata: + labels: + type: sharded + name: hello-openshift-actions + namespace: hello-openshift + spec: + subdomain: hello-openshift + tls: + termination: edge + to: + kind: Service + name: hello-openshift + httpHeaders: + actions: + response: + - name: X-Frame-Options + action: + set: + value: DENY + expectedError: 'Route.route.openshift.io "hello-openshift-actions" is invalid: [spec.httpHeaders.actions.response[0].action.type: Required value, : Invalid value: "null": some validation rules were not checked because the object was invalid; correct the existing errors to complete validation]' diff --git a/vendor/github.com/openshift/api/route/v1/techpreview.route.testsuite.yaml b/vendor/github.com/openshift/api/route/v1/techpreview.route.testsuite.yaml new file mode 100644 index 000000000..0f0cdd11b --- /dev/null +++ b/vendor/github.com/openshift/api/route/v1/techpreview.route.testsuite.yaml @@ -0,0 +1,103 @@ +apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this +name: '[TechPreview] Route' +crd: route-TechPreviewNoUpgrade.crd.yaml +tests: + onCreate: + - name: Should be able to create a minimal Route + initial: | + apiVersion: route.openshift.io/v1 + kind: Route + spec: + to: + kind: Service + name: foo + expected: | + apiVersion: route.openshift.io/v1 + kind: Route + spec: + to: + kind: Service + name: foo + weight: 100 + wildcardPolicy: None + - name: 'cannot have both spec.tls.termination: passthrough and spec.tls.insecureEdgeTerminationPolicy: Allow' + initial: | + apiVersion: route.openshift.io/v1 + kind: Route + spec: + to: + kind: Service + name: foo + tls: + termination: passthrough + insecureEdgeTerminationPolicy: Allow + expectedError: 'cannot have both spec.tls.termination: passthrough and spec.tls.insecureEdgeTerminationPolicy: Allow' + - name: 'spec.tls.termination: passthrough is compatible with spec.tls.insecureEdgeTerminationPolicy: Redirect' + initial: | + apiVersion: route.openshift.io/v1 + kind: Route + spec: + host: test.foo + to: + kind: Service + name: foo + tls: + termination: passthrough + insecureEdgeTerminationPolicy: Redirect + expected: | + apiVersion: route.openshift.io/v1 + kind: Route + spec: + host: test.foo + to: + kind: Service + name: foo + weight: 100 + tls: + termination: passthrough + insecureEdgeTerminationPolicy: Redirect + wildcardPolicy: None + - name: 'spec.tls.termination: passthrough is compatible with spec.tls.insecureEdgeTerminationPolicy: None' + initial: | + apiVersion: route.openshift.io/v1 + kind: Route + spec: + host: test.foo + to: + kind: Service + name: foo + tls: + termination: passthrough + insecureEdgeTerminationPolicy: None + expected: | + apiVersion: route.openshift.io/v1 + kind: Route + spec: + host: test.foo + to: + kind: Service + name: foo + weight: 100 + tls: + termination: passthrough + insecureEdgeTerminationPolicy: None + wildcardPolicy: None + - name: 'cannot have both spec.tls.certificate and spec.tls.externalCertificate' + initial: | + apiVersion: route.openshift.io/v1 + kind: Route + spec: + to: + kind: Service + name: foo + tls: + termination: edge + key: |- + -----BEGIN RSA PRIVATE KEY----- + -----END RSA PRIVATE KEY----- + certificate: |- + -----BEGIN CERTIFICATE----- + -----END CERTIFICATE----- + externalCertificate: + name: "my-local-secret" + expectedError: 'Invalid value: "object": cannot have both spec.tls.certificate and spec.tls.externalCertificate' diff --git a/vendor/github.com/openshift/api/route/v1/test-route-validation.sh b/vendor/github.com/openshift/api/route/v1/test-route-validation.sh new file mode 100644 index 000000000..f1192d4a1 --- /dev/null +++ b/vendor/github.com/openshift/api/route/v1/test-route-validation.sh @@ -0,0 +1,476 @@ +#!/bin/bash + +# This shell script runs a series of `oc` commands to create various OpenShift +# route objects, some invalid and some valid, and verifies that the API rejects +# the invalid ones and admits the valid ones. Note that this script does not +# verify defaulting behavior and does not examine the rejection reason; it only +# checks whether the `oc create` command succeeds or fails. This script +# requires a cluster and a kubeconfig in a location where oc will find it. + +set -uo pipefail + +expect_pass() { + rc=$? + if [[ $rc != 0 ]] + then + tput setaf 1 + echo "expected success: $*, got exit code $rc" + tput sgr0 + exit 1 + fi + tput setaf 2 + echo "got expected success: $*" + tput sgr0 +} + +expect_fail() { + rc=$? + if [[ $rc = 0 ]] + then + tput setaf 1 + echo "expected failure: $*, got exit code $rc" + exit 1 + fi + tput setaf 2 + echo "got expected failure: $*" + tput sgr0 +} + +delete_route() { + oc -n openshift-ingress delete routes.route/testroute || exit 1 +} + +oc create -f - <<'EOF' +apiVersion: route.openshift.io/v1 +kind: Route +metadata: + namespace: openshift-ingress + name: testroute +spec: + host: test.foo + path: / + tls: + termination: passthrough + to: + kind: Service + name: router-internal-default +EOF +expect_fail 'passthrough with nonempty path' + +oc create -f - <<'EOF' +apiVersion: route.openshift.io/v1 +kind: Route +metadata: + namespace: openshift-ingress + name: testroute +spec: + host: test.foo + path: / + to: + kind: Service + name: router-internal-default +EOF +expect_pass 'non-TLS with nonempty path' +delete_route + +oc create -f - <<'EOF' +apiVersion: route.openshift.io/v1 +kind: Route +metadata: + namespace: openshift-ingress + name: testroute +spec: + host: test.foo + path: / + tls: + termination: edge + to: + kind: Service + name: router-internal-default +EOF +expect_pass 'edge-terminated with nonempty path' +delete_route + +oc create -f - <<'EOF' +apiVersion: route.openshift.io/v1 +kind: Route +metadata: + namespace: openshift-ingress + name: testroute +spec: + host: test.foo + path: x + tls: + termination: edge + to: + kind: Service + name: router-internal-default +EOF +expect_fail 'path starting with non-slash character' + +oc create -f - <<'EOF' +apiVersion: route.openshift.io/v1 +kind: Route +metadata: + namespace: openshift-ingress + name: testroute +spec: + to: + kind: Service + name: router-internal-default + wildcardPolicy: Subdomain +EOF +expect_fail 'spec.wildcardPolicy: Subdomain requires a nonempty value for spec.host' + +oc create -f - <<'EOF' +apiVersion: route.openshift.io/v1 +kind: Route +metadata: + namespace: openshift-ingress + name: testroute +spec: + host: test.foo + to: + name: router-internal-default + port: + targetPort: "" +EOF +expect_fail 'cannot have empty spec.port.targetPort' + +oc create -f - <<'EOF' +apiVersion: route.openshift.io/v1 +kind: Route +metadata: + namespace: openshift-ingress + name: testroute +spec: + host: test.foo + to: + name: router-internal-default + port: + targetPort: 0 +EOF +expect_fail 'cannot have numeric 0 value for spec.port.targetPort' + +oc create -f - <<'EOF' +apiVersion: route.openshift.io/v1 +kind: Route +metadata: + namespace: openshift-ingress + name: testroute +spec: + host: test.foo + to: + name: router-internal-default + port: + targetPort: "0" +EOF +expect_pass 'can have string "0" value for spec.port.targetPort' +delete_route + +oc create -f - <<'EOF' +apiVersion: route.openshift.io/v1 +kind: Route +metadata: + namespace: openshift-ingress + name: testroute +spec: + host: test.foo + to: + name: router-internal-default + port: + targetPort: 1 +EOF +expect_pass 'can have numeric 1 value for spec.port.targetPort' +delete_route + +oc create -f - <<'EOF' +apiVersion: route.openshift.io/v1 +kind: Route +metadata: + namespace: openshift-ingress + name: testroute +spec: + host: test.foo + to: + name: router-internal-default + port: + targetPort: x +EOF +expect_pass 'can have string "x" value for spec.port.targetPort' +delete_route + +oc create -f - <<'EOF' +apiVersion: route.openshift.io/v1 +kind: Route +metadata: + namespace: openshift-ingress + name: testroute +spec: + host: test.foo + tls: + termination: passthrough + to: + kind: Nonsense + name: router-internal-default +EOF +expect_fail 'nonsense value for spec.to.kind' + + +oc create -f - <<'EOF' +apiVersion: route.openshift.io/v1 +kind: Route +metadata: + namespace: openshift-ingress + name: testroute +spec: + host: test.foo + tls: + termination: passthrough + to: + kind: Service + name: "" +EOF +expect_fail 'spec.to.name cannot be empty' + +oc create -f - <<'EOF' +apiVersion: route.openshift.io/v1 +kind: Route +metadata: + namespace: openshift-ingress + name: testroute +spec: + host: test.foo + to: + name: router-internal-default + weight: -1 +EOF +expect_fail 'spec.to.weight cannot be negative' + +oc create -f - <<'EOF' +apiVersion: route.openshift.io/v1 +kind: Route +metadata: + namespace: openshift-ingress + name: testroute +spec: + host: test.foo + to: + name: router-internal-default + weight: 300 +EOF +expect_fail 'spec.to.weight cannot exceed 256' + +oc create -f - <<'EOF' +apiVersion: route.openshift.io/v1 +kind: Route +metadata: + namespace: openshift-ingress + name: testroute +spec: + host: test.foo + to: + name: router-internal-default + weight: 100 +EOF +expect_pass 'spec.to.weight has a valid value' +delete_route + +oc create -f - <<'EOF' +apiVersion: route.openshift.io/v1 +kind: Route +metadata: + namespace: openshift-ingress + name: testroute +spec: + host: test.foo + to: + name: router-internal-default + alternateBackends: + - name: router-internal-default + - name: router-internal-default + - name: router-internal-default + - name: router-internal-default +EOF +expect_fail 'cannot have >3 values under spec.alternateBackends' + +oc create -f - <<'EOF' +apiVersion: route.openshift.io/v1 +kind: Route +metadata: + namespace: openshift-ingress + name: testroute +spec: + host: test.foo + to: + name: router-internal-default + alternateBackends: + - name: router-internal-default + - name: "" + - name: router-internal-default +EOF +expect_fail 'cannot have empty spec.alternateBackends[*].name' + +oc create -f - <<'EOF' +apiVersion: route.openshift.io/v1 +kind: Route +metadata: + namespace: openshift-ingress + name: testroute +spec: + host: test.foo + to: + name: router-internal-default + alternateBackends: + - name: router-internal-default + - name: router-internal-default + - name: router-internal-default +EOF +expect_pass 'valid spec.alternateBackends' +delete_route + +oc create -f - <<'EOF' +apiVersion: route.openshift.io/v1 +kind: Route +metadata: + namespace: openshift-ingress + name: testroute +spec: + host: test.foo + to: + name: router-internal-default + tls: + termination: passthrough + certificate: "x" +EOF +expect_fail 'cannot have both spec.tls.termination: passthrough and nonempty spec.tls.certificate' + +oc create -f - <<'EOF' +apiVersion: route.openshift.io/v1 +kind: Route +metadata: + namespace: openshift-ingress + name: testroute +spec: + host: test.foo + to: + name: router-internal-default + tls: + termination: passthrough + key: "x" +EOF +expect_fail 'cannot have both spec.tls.termination: passthrough and nonempty spec.tls.key' + +oc create -f - <<'EOF' +apiVersion: route.openshift.io/v1 +kind: Route +metadata: + namespace: openshift-ingress + name: testroute +spec: + host: test.foo + to: + name: router-internal-default + tls: + termination: passthrough + caCertificate: "x" +EOF +expect_fail 'cannot have both spec.tls.termination: passthrough and nonempty spec.tls.caCertificate' + +oc create -f - <<'EOF' +apiVersion: route.openshift.io/v1 +kind: Route +metadata: + namespace: openshift-ingress + name: testroute +spec: + host: test.foo + to: + name: router-internal-default + tls: + termination: passthrough + destinationCACertificate: "x" +EOF +expect_fail 'cannot have both spec.tls.termination: passthrough and nonempty spec.tls.destinationCACertificate' + +oc create -f - <<'EOF' +apiVersion: route.openshift.io/v1 +kind: Route +metadata: + namespace: openshift-ingress + name: testroute +spec: + host: test.foo + to: + name: router-internal-default + tls: + termination: edge + destinationCACertificate: "x" +EOF +expect_fail 'cannot have both spec.tls.termination: edge and nonempty spec.tls.destinationCACertificate' + +oc create -f - <<'EOF' +apiVersion: route.openshift.io/v1 +kind: Route +metadata: + namespace: openshift-ingress + name: testroute +spec: + host: test.foo + to: + name: router-internal-default + tls: + termination: edge + insecureEdgeTerminationPolicy: nonsense +EOF +expect_fail 'cannot have nonsense value for spec.tls.insecureEdgeTerminationPolicy' + +oc create -f - <<'EOF' +apiVersion: route.openshift.io/v1 +kind: Route +metadata: + namespace: openshift-ingress + name: testroute +spec: + host: test.foo + to: + name: router-internal-default + tls: + termination: passthrough + insecureEdgeTerminationPolicy: Allow +EOF +expect_fail 'cannot have both spec.tls.termination: passthrough and spec.tls.insecureEdgeTerminationPolicy: Allow' + +oc create -f - <<'EOF' +apiVersion: route.openshift.io/v1 +kind: Route +metadata: + namespace: openshift-ingress + name: testroute +spec: + host: test.foo + to: + name: router-internal-default + tls: + termination: passthrough + insecureEdgeTerminationPolicy: Redirect +EOF +expect_pass 'spec.tls.termination: passthrough is compatible with spec.tls.insecureEdgeTerminationPolicy: Redirect' +delete_route + +oc create -f - <<'EOF' +apiVersion: route.openshift.io/v1 +kind: Route +metadata: + namespace: openshift-ingress + name: testroute +spec: + host: test.foo + to: + name: router-internal-default + tls: + termination: passthrough + insecureEdgeTerminationPolicy: None +EOF +expect_pass 'spec.tls.termination: passthrough is compatible with spec.tls.insecureEdgeTerminationPolicy: None' +delete_route diff --git a/vendor/github.com/openshift/api/route/v1/types.go b/vendor/github.com/openshift/api/route/v1/types.go new file mode 100644 index 000000000..2de728bc0 --- /dev/null +++ b/vendor/github.com/openshift/api/route/v1/types.go @@ -0,0 +1,537 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// A route allows developers to expose services through an HTTP(S) aware load balancing and proxy +// layer via a public DNS entry. The route may further specify TLS options and a certificate, or +// specify a public CNAME that the router should also accept for HTTP and HTTPS traffic. An +// administrator typically configures their router to be visible outside the cluster firewall, and +// may also add additional security, caching, or traffic controls on the service content. Routers +// usually talk directly to the service endpoints. +// +// Once a route is created, the `host` field may not be changed. Generally, routers use the oldest +// route with a given host when resolving conflicts. +// +// Routers are subject to additional customization and may support additional controls via the +// annotations field. +// +// Because administrators may configure multiple routers, the route status field is used to +// return information to clients about the names and states of the route under each router. +// If a client chooses a duplicate name, for instance, the route status conditions are used +// to indicate the route cannot be chosen. +// +// To enable HTTP/2 ALPN on a route it requires a custom +// (non-wildcard) certificate. This prevents connection coalescing by +// clients, notably web browsers. We do not support HTTP/2 ALPN on +// routes that use the default certificate because of the risk of +// connection re-use/coalescing. Routes that do not have their own +// custom certificate will not be HTTP/2 ALPN-enabled on either the +// frontend or the backend. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type Route struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // spec is the desired state of the route + // +kubebuilder:validation:XValidation:rule="!has(self.tls) || self.tls.termination != 'passthrough' || !has(self.httpHeaders)",message="header actions are not permitted when tls termination is passthrough." + Spec RouteSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + // status is the current state of the route + // +optional + Status RouteStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RouteList is a collection of Routes. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type RouteList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is a list of routes + Items []Route `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// RouteSpec describes the hostname or path the route exposes, any security information, +// and one to four backends (services) the route points to. Requests are distributed +// among the backends depending on the weights assigned to each backend. When using +// roundrobin scheduling the portion of requests that go to each backend is the backend +// weight divided by the sum of all of the backend weights. When the backend has more than +// one endpoint the requests that end up on the backend are roundrobin distributed among +// the endpoints. Weights are between 0 and 256 with default 100. Weight 0 causes no requests +// to the backend. If all weights are zero the route will be considered to have no backends +// and return a standard 503 response. +// +// The `tls` field is optional and allows specific certificates or behavior for the +// route. Routers typically configure a default certificate on a wildcard domain to +// terminate routes without explicit certificates, but custom hostnames usually must +// choose passthrough (send traffic directly to the backend via the TLS Server-Name- +// Indication field) or provide a certificate. +type RouteSpec struct { + // host is an alias/DNS that points to the service. Optional. + // If not specified a route name will typically be automatically + // chosen. + // Must follow DNS952 subdomain conventions. + // + // +optional + // +kubebuilder:validation:MaxLength=253 + // +kubebuilder:validation:Pattern=`^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$` + Host string `json:"host,omitempty" protobuf:"bytes,1,opt,name=host"` + // subdomain is a DNS subdomain that is requested within the ingress controller's + // domain (as a subdomain). If host is set this field is ignored. An ingress + // controller may choose to ignore this suggested name, in which case the controller + // will report the assigned name in the status.ingress array or refuse to admit the + // route. If this value is set and the server does not support this field host will + // be populated automatically. Otherwise host is left empty. The field may have + // multiple parts separated by a dot, but not all ingress controllers may honor + // the request. This field may not be changed after creation except by a user with + // the update routes/custom-host permission. + // + // Example: subdomain `frontend` automatically receives the router subdomain + // `apps.mycluster.com` to have a full hostname `frontend.apps.mycluster.com`. + // + // +optional + // +kubebuilder:validation:MaxLength=253 + // +kubebuilder:validation:Pattern=`^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$` + Subdomain string `json:"subdomain,omitempty" protobuf:"bytes,8,opt,name=subdomain"` + + // path that the router watches for, to route traffic for to the service. Optional + // + // +optional + // +kubebuilder:validation:Pattern=`^/` + Path string `json:"path,omitempty" protobuf:"bytes,2,opt,name=path"` + + // to is an object the route should use as the primary backend. Only the Service kind + // is allowed, and it will be defaulted to Service. If the weight field (0-256 default 100) + // is set to zero, no traffic will be sent to this backend. + To RouteTargetReference `json:"to" protobuf:"bytes,3,opt,name=to"` + + // alternateBackends allows up to 3 additional backends to be assigned to the route. + // Only the Service kind is allowed, and it will be defaulted to Service. + // Use the weight field in RouteTargetReference object to specify relative preference. + // + // +kubebuilder:validation:MaxItems=3 + AlternateBackends []RouteTargetReference `json:"alternateBackends,omitempty" protobuf:"bytes,4,rep,name=alternateBackends"` + + // If specified, the port to be used by the router. Most routers will use all + // endpoints exposed by the service by default - set this value to instruct routers + // which port to use. + Port *RoutePort `json:"port,omitempty" protobuf:"bytes,5,opt,name=port"` + + // The tls field provides the ability to configure certificates and termination for the route. + TLS *TLSConfig `json:"tls,omitempty" protobuf:"bytes,6,opt,name=tls"` + + // Wildcard policy if any for the route. + // Currently only 'Subdomain' or 'None' is allowed. + // + // +kubebuilder:validation:Enum=None;Subdomain;"" + // +kubebuilder:default=None + WildcardPolicy WildcardPolicyType `json:"wildcardPolicy,omitempty" protobuf:"bytes,7,opt,name=wildcardPolicy"` + + // httpHeaders defines policy for HTTP headers. + // + // +optional + HTTPHeaders *RouteHTTPHeaders `json:"httpHeaders,omitempty" protobuf:"bytes,9,opt,name=httpHeaders"` +} + +// RouteHTTPHeaders defines policy for HTTP headers. +type RouteHTTPHeaders struct { + // actions specifies options for modifying headers and their values. + // Note that this option only applies to cleartext HTTP connections + // and to secure HTTP connections for which the ingress controller + // terminates encryption (that is, edge-terminated or reencrypt + // connections). Headers cannot be modified for TLS passthrough + // connections. + // Setting the HSTS (`Strict-Transport-Security`) header is not supported via actions. + // `Strict-Transport-Security` may only be configured using the "haproxy.router.openshift.io/hsts_header" + // route annotation, and only in accordance with the policy specified in Ingress.Spec.RequiredHSTSPolicies. + // In case of HTTP request headers, the actions specified in spec.httpHeaders.actions on the Route will be executed after + // the actions specified in the IngressController's spec.httpHeaders.actions field. + // In case of HTTP response headers, the actions specified in spec.httpHeaders.actions on the IngressController will be + // executed after the actions specified in the Route's spec.httpHeaders.actions field. + // The headers set via this API will not appear in access logs. + // Any actions defined here are applied after any actions related to the following other fields: + // cache-control, spec.clientTLS, + // spec.httpHeaders.forwardedHeaderPolicy, spec.httpHeaders.uniqueId, + // and spec.httpHeaders.headerNameCaseAdjustments. + // The following header names are reserved and may not be modified via this API: + // Strict-Transport-Security, Proxy, Cookie, Set-Cookie. + // Note that the total size of all net added headers *after* interpolating dynamic values + // must not exceed the value of spec.tuningOptions.headerBufferMaxRewriteBytes on the + // IngressController. Please refer to the documentation + // for that API field for more details. + // +optional + Actions RouteHTTPHeaderActions `json:"actions,omitempty" protobuf:"bytes,1,opt,name=actions"` +} + +// RouteHTTPHeaderActions defines configuration for actions on HTTP request and response headers. +type RouteHTTPHeaderActions struct { + // response is a list of HTTP response headers to modify. + // Currently, actions may define to either `Set` or `Delete` headers values. + // Actions defined here will modify the response headers of all requests made through a route. + // These actions are applied to a specific Route defined within a cluster i.e. connections made through a route. + // Route actions will be executed before IngressController actions for response headers. + // Actions are applied in sequence as defined in this list. + // A maximum of 20 response header actions may be configured. + // You can use this field to specify HTTP response headers that should be set or deleted + // when forwarding responses from your application to the client. + // Sample fetchers allowed are "res.hdr" and "ssl_c_der". + // Converters allowed are "lower" and "base64". + // Example header values: "%[res.hdr(X-target),lower]", "%{+Q}[ssl_c_der,base64]". + // Note: This field cannot be used if your route uses TLS passthrough. + // + --- + // + Note: Any change to regex mentioned below must be reflected in the CRD validation of route in https://github.com/openshift/library-go/blob/master/pkg/route/validation/validation.go and vice-versa. + // +listType=map + // +listMapKey=name + // +optional + // +kubebuilder:validation:MaxItems=20 + // +kubebuilder:validation:XValidation:rule=`self.all(key, key.action.type == "Delete" || (has(key.action.set) && key.action.set.value.matches('^(?:%(?:%|(?:\\{[-+]?[QXE](?:,[-+]?[QXE])*\\})?\\[(?:res\\.hdr\\([0-9A-Za-z-]+\\)|ssl_c_der)(?:,(?:lower|base64))*\\])|[^%[:cntrl:]])+$')))`,message="Either the header value provided is not in correct format or the sample fetcher/converter specified is not allowed. The dynamic header value will be interpreted as an HAProxy format string as defined in http://cbonte.github.io/haproxy-dconv/2.6/configuration.html#8.2.6 and may use HAProxy's %[] syntax and otherwise must be a valid HTTP header value as defined in https://datatracker.ietf.org/doc/html/rfc7230#section-3.2. Sample fetchers allowed are res.hdr, ssl_c_der. Converters allowed are lower, base64." + Response []RouteHTTPHeader `json:"response" protobuf:"bytes,1,rep,name=response"` + // request is a list of HTTP request headers to modify. + // Currently, actions may define to either `Set` or `Delete` headers values. + // Actions defined here will modify the request headers of all requests made through a route. + // These actions are applied to a specific Route defined within a cluster i.e. connections made through a route. + // Currently, actions may define to either `Set` or `Delete` headers values. + // Route actions will be executed after IngressController actions for request headers. + // Actions are applied in sequence as defined in this list. + // A maximum of 20 request header actions may be configured. + // You can use this field to specify HTTP request headers that should be set or deleted + // when forwarding connections from the client to your application. + // Sample fetchers allowed are "req.hdr" and "ssl_c_der". + // Converters allowed are "lower" and "base64". + // Example header values: "%[req.hdr(X-target),lower]", "%{+Q}[ssl_c_der,base64]". + // Any request header configuration applied directly via a Route resource using this API + // will override header configuration for a header of the same name applied via + // spec.httpHeaders.actions on the IngressController or route annotation. + // Note: This field cannot be used if your route uses TLS passthrough. + // + --- + // + Note: Any change to regex mentioned below must be reflected in the CRD validation of route in https://github.com/openshift/library-go/blob/master/pkg/route/validation/validation.go and vice-versa. + // +listType=map + // +listMapKey=name + // +optional + // +kubebuilder:validation:MaxItems=20 + // +kubebuilder:validation:XValidation:rule=`self.all(key, key.action.type == "Delete" || (has(key.action.set) && key.action.set.value.matches('^(?:%(?:%|(?:\\{[-+]?[QXE](?:,[-+]?[QXE])*\\})?\\[(?:req\\.hdr\\([0-9A-Za-z-]+\\)|ssl_c_der)(?:,(?:lower|base64))*\\])|[^%[:cntrl:]])+$')))`,message="Either the header value provided is not in correct format or the sample fetcher/converter specified is not allowed. The dynamic header value will be interpreted as an HAProxy format string as defined in http://cbonte.github.io/haproxy-dconv/2.6/configuration.html#8.2.6 and may use HAProxy's %[] syntax and otherwise must be a valid HTTP header value as defined in https://datatracker.ietf.org/doc/html/rfc7230#section-3.2. Sample fetchers allowed are req.hdr, ssl_c_der. Converters allowed are lower, base64." + Request []RouteHTTPHeader `json:"request" protobuf:"bytes,2,rep,name=request"` +} + +// RouteHTTPHeader specifies configuration for setting or deleting an HTTP header. +type RouteHTTPHeader struct { + // name specifies the name of a header on which to perform an action. Its value must be a valid HTTP header + // name as defined in RFC 2616 section 4.2. + // The name must consist only of alphanumeric and the following special characters, "-!#$%&'*+.^_`". + // The following header names are reserved and may not be modified via this API: + // Strict-Transport-Security, Proxy, Cookie, Set-Cookie. + // It must be no more than 255 characters in length. + // Header name must be unique. + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=255 + // +kubebuilder:validation:Pattern="^[-!#$%&'*+.0-9A-Z^_`a-z|~]+$" + // +kubebuilder:validation:XValidation:rule="self.lowerAscii() != 'strict-transport-security'",message="strict-transport-security header may not be modified via header actions" + // +kubebuilder:validation:XValidation:rule="self.lowerAscii() != 'proxy'",message="proxy header may not be modified via header actions" + // +kubebuilder:validation:XValidation:rule="self.lowerAscii() != 'cookie'",message="cookie header may not be modified via header actions" + // +kubebuilder:validation:XValidation:rule="self.lowerAscii() != 'set-cookie'",message="set-cookie header may not be modified via header actions" + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + + // action specifies actions to perform on headers, such as setting or deleting headers. + // +kubebuilder:validation:Required + Action RouteHTTPHeaderActionUnion `json:"action" protobuf:"bytes,2,opt,name=action"` +} + +// RouteHTTPHeaderActionUnion specifies an action to take on an HTTP header. +// +kubebuilder:validation:XValidation:rule="has(self.type) && self.type == 'Set' ? has(self.set) : !has(self.set)",message="set is required when type is Set, and forbidden otherwise" +// +union +type RouteHTTPHeaderActionUnion struct { + // type defines the type of the action to be applied on the header. + // Possible values are Set or Delete. + // Set allows you to set HTTP request and response headers. + // Delete allows you to delete HTTP request and response headers. + // +unionDiscriminator + // +kubebuilder:validation:Enum:=Set;Delete + // +kubebuilder:validation:Required + Type RouteHTTPHeaderActionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=RouteHTTPHeaderActionType"` + + // set defines the HTTP header that should be set: added if it doesn't exist or replaced if it does. + // This field is required when type is Set and forbidden otherwise. + // +optional + // +unionMember + Set *RouteSetHTTPHeader `json:"set,omitempty" protobuf:"bytes,2,opt,name=set"` +} + +// RouteSetHTTPHeader specifies what value needs to be set on an HTTP header. +type RouteSetHTTPHeader struct { + // value specifies a header value. + // Dynamic values can be added. The value will be interpreted as an HAProxy format string as defined in + // http://cbonte.github.io/haproxy-dconv/2.6/configuration.html#8.2.6 and may use HAProxy's %[] syntax and + // otherwise must be a valid HTTP header value as defined in https://datatracker.ietf.org/doc/html/rfc7230#section-3.2. + // The value of this field must be no more than 16384 characters in length. + // Note that the total size of all net added headers *after* interpolating dynamic values + // must not exceed the value of spec.tuningOptions.headerBufferMaxRewriteBytes on the + // IngressController. + // + --- + // + Note: This limit was selected as most common web servers have a limit of 16384 characters or some lower limit. + // + See . + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=16384 + Value string `json:"value" protobuf:"bytes,1,opt,name=value"` +} + +// RouteHTTPHeaderActionType defines actions that can be performed on HTTP headers. +type RouteHTTPHeaderActionType string + +const ( + // Set specifies that an HTTP header should be set. + Set RouteHTTPHeaderActionType = "Set" + // Delete specifies that an HTTP header should be deleted. + Delete RouteHTTPHeaderActionType = "Delete" +) + +// RouteTargetReference specifies the target that resolve into endpoints. Only the 'Service' +// kind is allowed. Use 'weight' field to emphasize one over others. +type RouteTargetReference struct { + // The kind of target that the route is referring to. Currently, only 'Service' is allowed + // + // +kubebuilder:validation:Enum=Service;"" + // +kubebuilder:default=Service + Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` + + // name of the service/target that is being referred to. e.g. name of the service + // + // +kubebuilder:validation:MinLength=1 + Name string `json:"name" protobuf:"bytes,2,opt,name=name"` + + // weight as an integer between 0 and 256, default 100, that specifies the target's relative weight + // against other target reference objects. 0 suppresses requests to this backend. + // + // +optional + // +kubebuilder:validation:Minimum=0 + // +kubebuilder:validation:Maximum=256 + // +kubebuilder:default=100 + Weight *int32 `json:"weight" protobuf:"varint,3,opt,name=weight"` +} + +// RoutePort defines a port mapping from a router to an endpoint in the service endpoints. +type RoutePort struct { + // The target port on pods selected by the service this route points to. + // If this is a string, it will be looked up as a named port in the target + // endpoints port list. Required + TargetPort intstr.IntOrString `json:"targetPort" protobuf:"bytes,1,opt,name=targetPort"` +} + +// RouteStatus provides relevant info about the status of a route, including which routers +// acknowledge it. +type RouteStatus struct { + // ingress describes the places where the route may be exposed. The list of + // ingress points may contain duplicate Host or RouterName values. Routes + // are considered live once they are `Ready` + Ingress []RouteIngress `json:"ingress,omitempty" protobuf:"bytes,1,rep,name=ingress"` +} + +// RouteIngress holds information about the places where a route is exposed. +type RouteIngress struct { + // Host is the host string under which the route is exposed; this value is required + Host string `json:"host,omitempty" protobuf:"bytes,1,opt,name=host"` + // Name is a name chosen by the router to identify itself; this value is required + RouterName string `json:"routerName,omitempty" protobuf:"bytes,2,opt,name=routerName"` + // Conditions is the state of the route, may be empty. + Conditions []RouteIngressCondition `json:"conditions,omitempty" protobuf:"bytes,3,rep,name=conditions"` + // Wildcard policy is the wildcard policy that was allowed where this route is exposed. + WildcardPolicy WildcardPolicyType `json:"wildcardPolicy,omitempty" protobuf:"bytes,4,opt,name=wildcardPolicy"` + // CanonicalHostname is the external host name for the router that can be used as a CNAME + // for the host requested for this route. This value is optional and may not be set in all cases. + RouterCanonicalHostname string `json:"routerCanonicalHostname,omitempty" protobuf:"bytes,5,opt,name=routerCanonicalHostname"` +} + +// RouteIngressConditionType is a valid value for RouteCondition +type RouteIngressConditionType string + +// These are valid conditions of pod. +const ( + // RouteAdmitted means the route is able to service requests for the provided Host + RouteAdmitted RouteIngressConditionType = "Admitted" + // TODO: add other route condition types +) + +// RouteIngressCondition contains details for the current condition of this route on a particular +// router. +type RouteIngressCondition struct { + // Type is the type of the condition. + // Currently only Admitted. + Type RouteIngressConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=RouteIngressConditionType"` + // Status is the status of the condition. + // Can be True, False, Unknown. + Status corev1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"` + // (brief) reason for the condition's last transition, and is usually a machine and human + // readable constant + Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"` + // Human readable message indicating details about last transition. + Message string `json:"message,omitempty" protobuf:"bytes,4,opt,name=message"` + // RFC 3339 date and time when this condition last transitioned + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,5,opt,name=lastTransitionTime"` +} + +// RouterShard has information of a routing shard and is used to +// generate host names and routing table entries when a routing shard is +// allocated for a specific route. +// Caveat: This is WIP and will likely undergo modifications when sharding +// support is added. +type RouterShard struct { + // shardName uniquely identifies a router shard in the "set" of + // routers used for routing traffic to the services. + ShardName string `json:"shardName" protobuf:"bytes,1,opt,name=shardName"` + + // dnsSuffix for the shard ala: shard-1.v3.openshift.com + DNSSuffix string `json:"dnsSuffix" protobuf:"bytes,2,opt,name=dnsSuffix"` +} + +// TLSConfig defines config used to secure a route and provide termination +// +// +kubebuilder:validation:XValidation:rule="has(self.termination) && has(self.insecureEdgeTerminationPolicy) ? !((self.termination=='passthrough') && (self.insecureEdgeTerminationPolicy=='Allow')) : true", message="cannot have both spec.tls.termination: passthrough and spec.tls.insecureEdgeTerminationPolicy: Allow" +// +openshift:validation:FeatureSetAwareXValidation:featureSet=TechPreviewNoUpgrade;CustomNoUpgrade,rule="!(has(self.certificate) && has(self.externalCertificate))", message="cannot have both spec.tls.certificate and spec.tls.externalCertificate" +type TLSConfig struct { + // termination indicates termination type. + // + // * edge - TLS termination is done by the router and http is used to communicate with the backend (default) + // * passthrough - Traffic is sent straight to the destination without the router providing TLS termination + // * reencrypt - TLS termination is done by the router and https is used to communicate with the backend + // + // Note: passthrough termination is incompatible with httpHeader actions + // +kubebuilder:validation:Enum=edge;reencrypt;passthrough + Termination TLSTerminationType `json:"termination" protobuf:"bytes,1,opt,name=termination,casttype=TLSTerminationType"` + + // certificate provides certificate contents. This should be a single serving certificate, not a certificate + // chain. Do not include a CA certificate. + Certificate string `json:"certificate,omitempty" protobuf:"bytes,2,opt,name=certificate"` + + // key provides key file contents + Key string `json:"key,omitempty" protobuf:"bytes,3,opt,name=key"` + + // caCertificate provides the cert authority certificate contents + CACertificate string `json:"caCertificate,omitempty" protobuf:"bytes,4,opt,name=caCertificate"` + + // destinationCACertificate provides the contents of the ca certificate of the final destination. When using reencrypt + // termination this file should be provided in order to have routers use it for health checks on the secure connection. + // If this field is not specified, the router may provide its own destination CA and perform hostname validation using + // the short service name (service.namespace.svc), which allows infrastructure generated certificates to automatically + // verify. + DestinationCACertificate string `json:"destinationCACertificate,omitempty" protobuf:"bytes,5,opt,name=destinationCACertificate"` + + // insecureEdgeTerminationPolicy indicates the desired behavior for insecure connections to a route. While + // each router may make its own decisions on which ports to expose, this is normally port 80. + // + // * Allow - traffic is sent to the server on the insecure port (edge/reencrypt terminations only) (default). + // * None - no traffic is allowed on the insecure port. + // * Redirect - clients are redirected to the secure port. + // + // +kubebuilder:validation:Enum=Allow;None;Redirect;"" + InsecureEdgeTerminationPolicy InsecureEdgeTerminationPolicyType `json:"insecureEdgeTerminationPolicy,omitempty" protobuf:"bytes,6,opt,name=insecureEdgeTerminationPolicy,casttype=InsecureEdgeTerminationPolicyType"` + + // externalCertificate provides certificate contents as a secret reference. + // This should be a single serving certificate, not a certificate + // chain. Do not include a CA certificate. The secret referenced should + // be present in the same namespace as that of the Route. + // Forbidden when `certificate` is set. + // + // +openshift:enable:FeatureSets=CustomNoUpgrade;TechPreviewNoUpgrade + // +optional + ExternalCertificate *LocalObjectReference `json:"externalCertificate,omitempty" protobuf:"bytes,7,opt,name=externalCertificate"` +} + +// LocalObjectReference contains enough information to let you locate the +// referenced object inside the same namespace. +// +structType=atomic +type LocalObjectReference struct { + // name of the referent. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + // +optional + Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` +} + +// TLSTerminationType dictates where the secure communication will stop +// TODO: Reconsider this type in v2 +type TLSTerminationType string + +// InsecureEdgeTerminationPolicyType dictates the behavior of insecure +// connections to an edge-terminated route. +type InsecureEdgeTerminationPolicyType string + +const ( + // TLSTerminationEdge terminate encryption at the edge router. + TLSTerminationEdge TLSTerminationType = "edge" + // TLSTerminationPassthrough terminate encryption at the destination, the destination is responsible for decrypting traffic + TLSTerminationPassthrough TLSTerminationType = "passthrough" + // TLSTerminationReencrypt terminate encryption at the edge router and re-encrypt it with a new certificate supplied by the destination + TLSTerminationReencrypt TLSTerminationType = "reencrypt" + + // InsecureEdgeTerminationPolicyNone disables insecure connections for an edge-terminated route. + InsecureEdgeTerminationPolicyNone InsecureEdgeTerminationPolicyType = "None" + // InsecureEdgeTerminationPolicyAllow allows insecure connections for an edge-terminated route. + InsecureEdgeTerminationPolicyAllow InsecureEdgeTerminationPolicyType = "Allow" + // InsecureEdgeTerminationPolicyRedirect redirects insecure connections for an edge-terminated route. + // As an example, for routers that support HTTP and HTTPS, the + // insecure HTTP connections will be redirected to use HTTPS. + InsecureEdgeTerminationPolicyRedirect InsecureEdgeTerminationPolicyType = "Redirect" +) + +// WildcardPolicyType indicates the type of wildcard support needed by routes. +type WildcardPolicyType string + +const ( + // WildcardPolicyNone indicates no wildcard support is needed. + WildcardPolicyNone WildcardPolicyType = "None" + + // WildcardPolicySubdomain indicates the host needs wildcard support for the subdomain. + // Example: For host = "www.acme.test", indicates that the router + // should support requests for *.acme.test + // Note that this will not match acme.test only *.acme.test + WildcardPolicySubdomain WildcardPolicyType = "Subdomain" +) + +// Route Annotations +const ( + // AllowNonDNSCompliantHostAnnotation indicates that the host name in a route + // configuration is not required to follow strict DNS compliance. + // Unless the annotation is set to true, the route host name must have at least one label. + // Labels must have no more than 63 characters from the set of + // alphanumeric characters, '-' or '.', and must start and end with an alphanumeric + // character. A trailing dot is not allowed. The total host name length must be no more + // than 253 characters. + // + // When the annotation is set to true, the host name must pass a smaller set of + // requirements, i.e.: character set as described above, and total host name + // length must be no more than 253 characters. + // + // NOTE: use of this annotation may validate routes that cannot be admitted and will + // not function. The annotation is provided to allow a custom scenario, e.g. a custom + // ingress controller that relies on the route API, but for some customized purpose + // needs to use routes with invalid hosts. + AllowNonDNSCompliantHostAnnotation = "route.openshift.io/allow-non-dns-compliant-host" +) + +// Ingress-to-route controller +const ( + // IngressToRouteIngressClassControllerName is the name of the + // controller that translates ingresses into routes. This value is + // intended to be used for the spec.controller field of ingressclasses. + IngressToRouteIngressClassControllerName = "openshift.io/ingress-to-route" +) diff --git a/vendor/github.com/openshift/api/route/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/route/v1/zz_generated.deepcopy.go new file mode 100644 index 000000000..23a2edd42 --- /dev/null +++ b/vendor/github.com/openshift/api/route/v1/zz_generated.deepcopy.go @@ -0,0 +1,368 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocalObjectReference) DeepCopyInto(out *LocalObjectReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalObjectReference. +func (in *LocalObjectReference) DeepCopy() *LocalObjectReference { + if in == nil { + return nil + } + out := new(LocalObjectReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Route) DeepCopyInto(out *Route) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Route. +func (in *Route) DeepCopy() *Route { + if in == nil { + return nil + } + out := new(Route) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Route) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteHTTPHeader) DeepCopyInto(out *RouteHTTPHeader) { + *out = *in + in.Action.DeepCopyInto(&out.Action) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteHTTPHeader. +func (in *RouteHTTPHeader) DeepCopy() *RouteHTTPHeader { + if in == nil { + return nil + } + out := new(RouteHTTPHeader) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteHTTPHeaderActionUnion) DeepCopyInto(out *RouteHTTPHeaderActionUnion) { + *out = *in + if in.Set != nil { + in, out := &in.Set, &out.Set + *out = new(RouteSetHTTPHeader) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteHTTPHeaderActionUnion. +func (in *RouteHTTPHeaderActionUnion) DeepCopy() *RouteHTTPHeaderActionUnion { + if in == nil { + return nil + } + out := new(RouteHTTPHeaderActionUnion) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteHTTPHeaderActions) DeepCopyInto(out *RouteHTTPHeaderActions) { + *out = *in + if in.Response != nil { + in, out := &in.Response, &out.Response + *out = make([]RouteHTTPHeader, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Request != nil { + in, out := &in.Request, &out.Request + *out = make([]RouteHTTPHeader, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteHTTPHeaderActions. +func (in *RouteHTTPHeaderActions) DeepCopy() *RouteHTTPHeaderActions { + if in == nil { + return nil + } + out := new(RouteHTTPHeaderActions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteHTTPHeaders) DeepCopyInto(out *RouteHTTPHeaders) { + *out = *in + in.Actions.DeepCopyInto(&out.Actions) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteHTTPHeaders. +func (in *RouteHTTPHeaders) DeepCopy() *RouteHTTPHeaders { + if in == nil { + return nil + } + out := new(RouteHTTPHeaders) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteIngress) DeepCopyInto(out *RouteIngress) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]RouteIngressCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteIngress. +func (in *RouteIngress) DeepCopy() *RouteIngress { + if in == nil { + return nil + } + out := new(RouteIngress) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteIngressCondition) DeepCopyInto(out *RouteIngressCondition) { + *out = *in + if in.LastTransitionTime != nil { + in, out := &in.LastTransitionTime, &out.LastTransitionTime + *out = (*in).DeepCopy() + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteIngressCondition. +func (in *RouteIngressCondition) DeepCopy() *RouteIngressCondition { + if in == nil { + return nil + } + out := new(RouteIngressCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteList) DeepCopyInto(out *RouteList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Route, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteList. +func (in *RouteList) DeepCopy() *RouteList { + if in == nil { + return nil + } + out := new(RouteList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RouteList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoutePort) DeepCopyInto(out *RoutePort) { + *out = *in + out.TargetPort = in.TargetPort + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoutePort. +func (in *RoutePort) DeepCopy() *RoutePort { + if in == nil { + return nil + } + out := new(RoutePort) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteSetHTTPHeader) DeepCopyInto(out *RouteSetHTTPHeader) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteSetHTTPHeader. +func (in *RouteSetHTTPHeader) DeepCopy() *RouteSetHTTPHeader { + if in == nil { + return nil + } + out := new(RouteSetHTTPHeader) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteSpec) DeepCopyInto(out *RouteSpec) { + *out = *in + in.To.DeepCopyInto(&out.To) + if in.AlternateBackends != nil { + in, out := &in.AlternateBackends, &out.AlternateBackends + *out = make([]RouteTargetReference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(RoutePort) + **out = **in + } + if in.TLS != nil { + in, out := &in.TLS, &out.TLS + *out = new(TLSConfig) + (*in).DeepCopyInto(*out) + } + if in.HTTPHeaders != nil { + in, out := &in.HTTPHeaders, &out.HTTPHeaders + *out = new(RouteHTTPHeaders) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteSpec. +func (in *RouteSpec) DeepCopy() *RouteSpec { + if in == nil { + return nil + } + out := new(RouteSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteStatus) DeepCopyInto(out *RouteStatus) { + *out = *in + if in.Ingress != nil { + in, out := &in.Ingress, &out.Ingress + *out = make([]RouteIngress, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteStatus. +func (in *RouteStatus) DeepCopy() *RouteStatus { + if in == nil { + return nil + } + out := new(RouteStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouteTargetReference) DeepCopyInto(out *RouteTargetReference) { + *out = *in + if in.Weight != nil { + in, out := &in.Weight, &out.Weight + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouteTargetReference. +func (in *RouteTargetReference) DeepCopy() *RouteTargetReference { + if in == nil { + return nil + } + out := new(RouteTargetReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RouterShard) DeepCopyInto(out *RouterShard) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RouterShard. +func (in *RouterShard) DeepCopy() *RouterShard { + if in == nil { + return nil + } + out := new(RouterShard) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSConfig) DeepCopyInto(out *TLSConfig) { + *out = *in + if in.ExternalCertificate != nil { + in, out := &in.ExternalCertificate, &out.ExternalCertificate + *out = new(LocalObjectReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSConfig. +func (in *TLSConfig) DeepCopy() *TLSConfig { + if in == nil { + return nil + } + out := new(TLSConfig) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/api/route/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/route/v1/zz_generated.swagger_doc_generated.go new file mode 100644 index 000000000..8d4958717 --- /dev/null +++ b/vendor/github.com/openshift/api/route/v1/zz_generated.swagger_doc_generated.go @@ -0,0 +1,189 @@ +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_LocalObjectReference = map[string]string{ + "": "LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace.", + "name": "name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", +} + +func (LocalObjectReference) SwaggerDoc() map[string]string { + return map_LocalObjectReference +} + +var map_Route = map[string]string{ + "": "A route allows developers to expose services through an HTTP(S) aware load balancing and proxy layer via a public DNS entry. The route may further specify TLS options and a certificate, or specify a public CNAME that the router should also accept for HTTP and HTTPS traffic. An administrator typically configures their router to be visible outside the cluster firewall, and may also add additional security, caching, or traffic controls on the service content. Routers usually talk directly to the service endpoints.\n\nOnce a route is created, the `host` field may not be changed. Generally, routers use the oldest route with a given host when resolving conflicts.\n\nRouters are subject to additional customization and may support additional controls via the annotations field.\n\nBecause administrators may configure multiple routers, the route status field is used to return information to clients about the names and states of the route under each router. If a client chooses a duplicate name, for instance, the route status conditions are used to indicate the route cannot be chosen.\n\nTo enable HTTP/2 ALPN on a route it requires a custom (non-wildcard) certificate. This prevents connection coalescing by clients, notably web browsers. We do not support HTTP/2 ALPN on routes that use the default certificate because of the risk of connection re-use/coalescing. Routes that do not have their own custom certificate will not be HTTP/2 ALPN-enabled on either the frontend or the backend.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec is the desired state of the route", + "status": "status is the current state of the route", +} + +func (Route) SwaggerDoc() map[string]string { + return map_Route +} + +var map_RouteHTTPHeader = map[string]string{ + "": "RouteHTTPHeader specifies configuration for setting or deleting an HTTP header.", + "name": "name specifies the name of a header on which to perform an action. Its value must be a valid HTTP header name as defined in RFC 2616 section 4.2. The name must consist only of alphanumeric and the following special characters, \"-!#$%&'*+.^_`\". The following header names are reserved and may not be modified via this API: Strict-Transport-Security, Proxy, Cookie, Set-Cookie. It must be no more than 255 characters in length. Header name must be unique.", + "action": "action specifies actions to perform on headers, such as setting or deleting headers.", +} + +func (RouteHTTPHeader) SwaggerDoc() map[string]string { + return map_RouteHTTPHeader +} + +var map_RouteHTTPHeaderActionUnion = map[string]string{ + "": "RouteHTTPHeaderActionUnion specifies an action to take on an HTTP header.", + "type": "type defines the type of the action to be applied on the header. Possible values are Set or Delete. Set allows you to set HTTP request and response headers. Delete allows you to delete HTTP request and response headers.", + "set": "set defines the HTTP header that should be set: added if it doesn't exist or replaced if it does. This field is required when type is Set and forbidden otherwise.", +} + +func (RouteHTTPHeaderActionUnion) SwaggerDoc() map[string]string { + return map_RouteHTTPHeaderActionUnion +} + +var map_RouteHTTPHeaderActions = map[string]string{ + "": "RouteHTTPHeaderActions defines configuration for actions on HTTP request and response headers.", + "response": "response is a list of HTTP response headers to modify. Currently, actions may define to either `Set` or `Delete` headers values. Actions defined here will modify the response headers of all requests made through a route. These actions are applied to a specific Route defined within a cluster i.e. connections made through a route. Route actions will be executed before IngressController actions for response headers. Actions are applied in sequence as defined in this list. A maximum of 20 response header actions may be configured. You can use this field to specify HTTP response headers that should be set or deleted when forwarding responses from your application to the client. Sample fetchers allowed are \"res.hdr\" and \"ssl_c_der\". Converters allowed are \"lower\" and \"base64\". Example header values: \"%[res.hdr(X-target),lower]\", \"%{+Q}[ssl_c_der,base64]\". Note: This field cannot be used if your route uses TLS passthrough. ", + "request": "request is a list of HTTP request headers to modify. Currently, actions may define to either `Set` or `Delete` headers values. Actions defined here will modify the request headers of all requests made through a route. These actions are applied to a specific Route defined within a cluster i.e. connections made through a route. Currently, actions may define to either `Set` or `Delete` headers values. Route actions will be executed after IngressController actions for request headers. Actions are applied in sequence as defined in this list. A maximum of 20 request header actions may be configured. You can use this field to specify HTTP request headers that should be set or deleted when forwarding connections from the client to your application. Sample fetchers allowed are \"req.hdr\" and \"ssl_c_der\". Converters allowed are \"lower\" and \"base64\". Example header values: \"%[req.hdr(X-target),lower]\", \"%{+Q}[ssl_c_der,base64]\". Any request header configuration applied directly via a Route resource using this API will override header configuration for a header of the same name applied via spec.httpHeaders.actions on the IngressController or route annotation. Note: This field cannot be used if your route uses TLS passthrough. ", +} + +func (RouteHTTPHeaderActions) SwaggerDoc() map[string]string { + return map_RouteHTTPHeaderActions +} + +var map_RouteHTTPHeaders = map[string]string{ + "": "RouteHTTPHeaders defines policy for HTTP headers.", + "actions": "actions specifies options for modifying headers and their values. Note that this option only applies to cleartext HTTP connections and to secure HTTP connections for which the ingress controller terminates encryption (that is, edge-terminated or reencrypt connections). Headers cannot be modified for TLS passthrough connections. Setting the HSTS (`Strict-Transport-Security`) header is not supported via actions. `Strict-Transport-Security` may only be configured using the \"haproxy.router.openshift.io/hsts_header\" route annotation, and only in accordance with the policy specified in Ingress.Spec.RequiredHSTSPolicies. In case of HTTP request headers, the actions specified in spec.httpHeaders.actions on the Route will be executed after the actions specified in the IngressController's spec.httpHeaders.actions field. In case of HTTP response headers, the actions specified in spec.httpHeaders.actions on the IngressController will be executed after the actions specified in the Route's spec.httpHeaders.actions field. The headers set via this API will not appear in access logs. Any actions defined here are applied after any actions related to the following other fields: cache-control, spec.clientTLS, spec.httpHeaders.forwardedHeaderPolicy, spec.httpHeaders.uniqueId, and spec.httpHeaders.headerNameCaseAdjustments. The following header names are reserved and may not be modified via this API: Strict-Transport-Security, Proxy, Cookie, Set-Cookie. Note that the total size of all net added headers *after* interpolating dynamic values must not exceed the value of spec.tuningOptions.headerBufferMaxRewriteBytes on the IngressController. Please refer to the documentation for that API field for more details.", +} + +func (RouteHTTPHeaders) SwaggerDoc() map[string]string { + return map_RouteHTTPHeaders +} + +var map_RouteIngress = map[string]string{ + "": "RouteIngress holds information about the places where a route is exposed.", + "host": "Host is the host string under which the route is exposed; this value is required", + "routerName": "Name is a name chosen by the router to identify itself; this value is required", + "conditions": "Conditions is the state of the route, may be empty.", + "wildcardPolicy": "Wildcard policy is the wildcard policy that was allowed where this route is exposed.", + "routerCanonicalHostname": "CanonicalHostname is the external host name for the router that can be used as a CNAME for the host requested for this route. This value is optional and may not be set in all cases.", +} + +func (RouteIngress) SwaggerDoc() map[string]string { + return map_RouteIngress +} + +var map_RouteIngressCondition = map[string]string{ + "": "RouteIngressCondition contains details for the current condition of this route on a particular router.", + "type": "Type is the type of the condition. Currently only Admitted.", + "status": "Status is the status of the condition. Can be True, False, Unknown.", + "reason": "(brief) reason for the condition's last transition, and is usually a machine and human readable constant", + "message": "Human readable message indicating details about last transition.", + "lastTransitionTime": "RFC 3339 date and time when this condition last transitioned", +} + +func (RouteIngressCondition) SwaggerDoc() map[string]string { + return map_RouteIngressCondition +} + +var map_RouteList = map[string]string{ + "": "RouteList is a collection of Routes.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is a list of routes", +} + +func (RouteList) SwaggerDoc() map[string]string { + return map_RouteList +} + +var map_RoutePort = map[string]string{ + "": "RoutePort defines a port mapping from a router to an endpoint in the service endpoints.", + "targetPort": "The target port on pods selected by the service this route points to. If this is a string, it will be looked up as a named port in the target endpoints port list. Required", +} + +func (RoutePort) SwaggerDoc() map[string]string { + return map_RoutePort +} + +var map_RouteSetHTTPHeader = map[string]string{ + "": "RouteSetHTTPHeader specifies what value needs to be set on an HTTP header.", + "value": "value specifies a header value. Dynamic values can be added. The value will be interpreted as an HAProxy format string as defined in http://cbonte.github.io/haproxy-dconv/2.6/configuration.html#8.2.6 and may use HAProxy's %[] syntax and otherwise must be a valid HTTP header value as defined in https://datatracker.ietf.org/doc/html/rfc7230#section-3.2. The value of this field must be no more than 16384 characters in length. Note that the total size of all net added headers *after* interpolating dynamic values must not exceed the value of spec.tuningOptions.headerBufferMaxRewriteBytes on the IngressController. ", +} + +func (RouteSetHTTPHeader) SwaggerDoc() map[string]string { + return map_RouteSetHTTPHeader +} + +var map_RouteSpec = map[string]string{ + "": "RouteSpec describes the hostname or path the route exposes, any security information, and one to four backends (services) the route points to. Requests are distributed among the backends depending on the weights assigned to each backend. When using roundrobin scheduling the portion of requests that go to each backend is the backend weight divided by the sum of all of the backend weights. When the backend has more than one endpoint the requests that end up on the backend are roundrobin distributed among the endpoints. Weights are between 0 and 256 with default 100. Weight 0 causes no requests to the backend. If all weights are zero the route will be considered to have no backends and return a standard 503 response.\n\nThe `tls` field is optional and allows specific certificates or behavior for the route. Routers typically configure a default certificate on a wildcard domain to terminate routes without explicit certificates, but custom hostnames usually must choose passthrough (send traffic directly to the backend via the TLS Server-Name- Indication field) or provide a certificate.", + "host": "host is an alias/DNS that points to the service. Optional. If not specified a route name will typically be automatically chosen. Must follow DNS952 subdomain conventions.", + "subdomain": "subdomain is a DNS subdomain that is requested within the ingress controller's domain (as a subdomain). If host is set this field is ignored. An ingress controller may choose to ignore this suggested name, in which case the controller will report the assigned name in the status.ingress array or refuse to admit the route. If this value is set and the server does not support this field host will be populated automatically. Otherwise host is left empty. The field may have multiple parts separated by a dot, but not all ingress controllers may honor the request. This field may not be changed after creation except by a user with the update routes/custom-host permission.\n\nExample: subdomain `frontend` automatically receives the router subdomain `apps.mycluster.com` to have a full hostname `frontend.apps.mycluster.com`.", + "path": "path that the router watches for, to route traffic for to the service. Optional", + "to": "to is an object the route should use as the primary backend. Only the Service kind is allowed, and it will be defaulted to Service. If the weight field (0-256 default 100) is set to zero, no traffic will be sent to this backend.", + "alternateBackends": "alternateBackends allows up to 3 additional backends to be assigned to the route. Only the Service kind is allowed, and it will be defaulted to Service. Use the weight field in RouteTargetReference object to specify relative preference.", + "port": "If specified, the port to be used by the router. Most routers will use all endpoints exposed by the service by default - set this value to instruct routers which port to use.", + "tls": "The tls field provides the ability to configure certificates and termination for the route.", + "wildcardPolicy": "Wildcard policy if any for the route. Currently only 'Subdomain' or 'None' is allowed.", + "httpHeaders": "httpHeaders defines policy for HTTP headers.", +} + +func (RouteSpec) SwaggerDoc() map[string]string { + return map_RouteSpec +} + +var map_RouteStatus = map[string]string{ + "": "RouteStatus provides relevant info about the status of a route, including which routers acknowledge it.", + "ingress": "ingress describes the places where the route may be exposed. The list of ingress points may contain duplicate Host or RouterName values. Routes are considered live once they are `Ready`", +} + +func (RouteStatus) SwaggerDoc() map[string]string { + return map_RouteStatus +} + +var map_RouteTargetReference = map[string]string{ + "": "RouteTargetReference specifies the target that resolve into endpoints. Only the 'Service' kind is allowed. Use 'weight' field to emphasize one over others.", + "kind": "The kind of target that the route is referring to. Currently, only 'Service' is allowed", + "name": "name of the service/target that is being referred to. e.g. name of the service", + "weight": "weight as an integer between 0 and 256, default 100, that specifies the target's relative weight against other target reference objects. 0 suppresses requests to this backend.", +} + +func (RouteTargetReference) SwaggerDoc() map[string]string { + return map_RouteTargetReference +} + +var map_RouterShard = map[string]string{ + "": "RouterShard has information of a routing shard and is used to generate host names and routing table entries when a routing shard is allocated for a specific route. Caveat: This is WIP and will likely undergo modifications when sharding support is added.", + "shardName": "shardName uniquely identifies a router shard in the \"set\" of routers used for routing traffic to the services.", + "dnsSuffix": "dnsSuffix for the shard ala: shard-1.v3.openshift.com", +} + +func (RouterShard) SwaggerDoc() map[string]string { + return map_RouterShard +} + +var map_TLSConfig = map[string]string{ + "": "TLSConfig defines config used to secure a route and provide termination", + "termination": "termination indicates termination type.\n\n* edge - TLS termination is done by the router and http is used to communicate with the backend (default) * passthrough - Traffic is sent straight to the destination without the router providing TLS termination * reencrypt - TLS termination is done by the router and https is used to communicate with the backend\n\nNote: passthrough termination is incompatible with httpHeader actions", + "certificate": "certificate provides certificate contents. This should be a single serving certificate, not a certificate chain. Do not include a CA certificate.", + "key": "key provides key file contents", + "caCertificate": "caCertificate provides the cert authority certificate contents", + "destinationCACertificate": "destinationCACertificate provides the contents of the ca certificate of the final destination. When using reencrypt termination this file should be provided in order to have routers use it for health checks on the secure connection. If this field is not specified, the router may provide its own destination CA and perform hostname validation using the short service name (service.namespace.svc), which allows infrastructure generated certificates to automatically verify.", + "insecureEdgeTerminationPolicy": "insecureEdgeTerminationPolicy indicates the desired behavior for insecure connections to a route. While each router may make its own decisions on which ports to expose, this is normally port 80.\n\n* Allow - traffic is sent to the server on the insecure port (edge/reencrypt terminations only) (default). * None - no traffic is allowed on the insecure port. * Redirect - clients are redirected to the secure port.", + "externalCertificate": "externalCertificate provides certificate contents as a secret reference. This should be a single serving certificate, not a certificate chain. Do not include a CA certificate. The secret referenced should be present in the same namespace as that of the Route. Forbidden when `certificate` is set.", +} + +func (TLSConfig) SwaggerDoc() map[string]string { + return map_TLSConfig +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/api/samples/.codegen.yaml b/vendor/github.com/openshift/api/samples/.codegen.yaml new file mode 100644 index 000000000..ffa2c8d9b --- /dev/null +++ b/vendor/github.com/openshift/api/samples/.codegen.yaml @@ -0,0 +1,2 @@ +swaggerdocs: + commentPolicy: Warn diff --git a/vendor/github.com/openshift/api/samples/install.go b/vendor/github.com/openshift/api/samples/install.go new file mode 100644 index 000000000..8ad4d8197 --- /dev/null +++ b/vendor/github.com/openshift/api/samples/install.go @@ -0,0 +1,26 @@ +package samples + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + samplesv1 "github.com/openshift/api/samples/v1" +) + +const ( + GroupName = "samples.operator.openshift.io" +) + +var ( + schemeBuilder = runtime.NewSchemeBuilder(samplesv1.Install) + // Install is a function which adds every version of this group to a scheme + Install = schemeBuilder.AddToScheme +) + +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +func Kind(kind string) schema.GroupKind { + return schema.GroupKind{Group: GroupName, Kind: kind} +} diff --git a/vendor/github.com/openshift/api/samples/v1/00_samplesconfig.crd.yaml b/vendor/github.com/openshift/api/samples/v1/00_samplesconfig.crd.yaml new file mode 100644 index 000000000..c55f98417 --- /dev/null +++ b/vendor/github.com/openshift/api/samples/v1/00_samplesconfig.crd.yaml @@ -0,0 +1,127 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/513 + description: Extension for configuring openshif samples operator. + displayName: ConfigsSamples + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: configs.samples.operator.openshift.io +spec: + group: samples.operator.openshift.io + names: + kind: Config + listKind: ConfigList + plural: configs + singular: config + preserveUnknownFields: false + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: "Config contains the configuration and detailed condition status for the Samples Operator. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + type: object + required: + - metadata + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: ConfigSpec contains the desired configuration and state for the Samples Operator, controlling various behavior around the imagestreams and templates it creates/updates in the openshift namespace. + type: object + properties: + architectures: + description: architectures determine which hardware architecture(s) to install, where x86_64, ppc64le, and s390x are the only supported choices currently. + type: array + items: + type: string + managementState: + description: managementState is top level on/off type of switch for all operators. When "Managed", this operator processes config and manipulates the samples accordingly. When "Unmanaged", this operator ignores any updates to the resources it watches. When "Removed", it reacts that same wasy as it does if the Config object is deleted, meaning any ImageStreams or Templates it manages (i.e. it honors the skipped lists) and the registry secret are deleted, along with the ConfigMap in the operator's namespace that represents the last config used to manipulate the samples, + type: string + pattern: ^(Managed|Unmanaged|Force|Removed)$ + samplesRegistry: + description: samplesRegistry allows for the specification of which registry is accessed by the ImageStreams for their image content. Defaults on the content in https://github.com/openshift/library that are pulled into this github repository, but based on our pulling only ocp content it typically defaults to registry.redhat.io. + type: string + skippedImagestreams: + description: skippedImagestreams specifies names of image streams that should NOT be created/updated. Admins can use this to allow them to delete content they don’t want. They will still have to manually delete the content but the operator will not recreate(or update) anything listed here. + type: array + items: + type: string + skippedTemplates: + description: skippedTemplates specifies names of templates that should NOT be created/updated. Admins can use this to allow them to delete content they don’t want. They will still have to manually delete the content but the operator will not recreate(or update) anything listed here. + type: array + items: + type: string + status: + description: ConfigStatus contains the actual configuration in effect, as well as various details that describe the state of the Samples Operator. + type: object + properties: + architectures: + description: architectures determine which hardware architecture(s) to install, where x86_64 and ppc64le are the supported choices. + type: array + items: + type: string + conditions: + description: conditions represents the available maintenance status of the sample imagestreams and templates. + type: array + items: + description: ConfigCondition captures various conditions of the Config as entries are processed. + type: object + required: + - status + - type + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition transitioned from one status to another. + type: string + format: date-time + lastUpdateTime: + description: lastUpdateTime is the last time this condition was updated. + type: string + format: date-time + message: + description: message is a human readable message indicating details about the transition. + type: string + reason: + description: reason is what caused the condition's last transition. + type: string + status: + description: status of the condition, one of True, False, Unknown. + type: string + type: + description: type of condition. + type: string + managementState: + description: managementState reflects the current operational status of the on/off switch for the operator. This operator compares the ManagementState as part of determining that we are turning the operator back on (i.e. "Managed") when it was previously "Unmanaged". + type: string + pattern: ^(Managed|Unmanaged|Force|Removed)$ + samplesRegistry: + description: samplesRegistry allows for the specification of which registry is accessed by the ImageStreams for their image content. Defaults on the content in https://github.com/openshift/library that are pulled into this github repository, but based on our pulling only ocp content it typically defaults to registry.redhat.io. + type: string + skippedImagestreams: + description: skippedImagestreams specifies names of image streams that should NOT be created/updated. Admins can use this to allow them to delete content they don’t want. They will still have to manually delete the content but the operator will not recreate(or update) anything listed here. + type: array + items: + type: string + skippedTemplates: + description: skippedTemplates specifies names of templates that should NOT be created/updated. Admins can use this to allow them to delete content they don’t want. They will still have to manually delete the content but the operator will not recreate(or update) anything listed here. + type: array + items: + type: string + version: + description: version is the value of the operator's payload based version indicator when it was last successfully processed + type: string + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/samples/v1/Makefile b/vendor/github.com/openshift/api/samples/v1/Makefile new file mode 100644 index 000000000..be24ecca0 --- /dev/null +++ b/vendor/github.com/openshift/api/samples/v1/Makefile @@ -0,0 +1,3 @@ +.PHONY: test +test: + make -C ../../tests test GINKGO_EXTRA_ARGS=--focus="samples.operator.openshift.io/v1" diff --git a/vendor/github.com/openshift/api/samples/v1/doc.go b/vendor/github.com/openshift/api/samples/v1/doc.go new file mode 100644 index 000000000..d63c96b77 --- /dev/null +++ b/vendor/github.com/openshift/api/samples/v1/doc.go @@ -0,0 +1,7 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true + +// +groupName=samples.operator.openshift.io +// Package v1 ist he v1 version of the API. +package v1 diff --git a/vendor/github.com/openshift/api/samples/v1/generated.pb.go b/vendor/github.com/openshift/api/samples/v1/generated.pb.go new file mode 100644 index 000000000..d2f1c4403 --- /dev/null +++ b/vendor/github.com/openshift/api/samples/v1/generated.pb.go @@ -0,0 +1,1847 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/openshift/api/samples/v1/generated.proto + +package v1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + github_com_openshift_api_operator_v1 "github.com/openshift/api/operator/v1" + + k8s_io_api_core_v1 "k8s.io/api/core/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *Config) Reset() { *m = Config{} } +func (*Config) ProtoMessage() {} +func (*Config) Descriptor() ([]byte, []int) { + return fileDescriptor_67d62912ac03ce1e, []int{0} +} +func (m *Config) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Config) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Config) XXX_Merge(src proto.Message) { + xxx_messageInfo_Config.Merge(m, src) +} +func (m *Config) XXX_Size() int { + return m.Size() +} +func (m *Config) XXX_DiscardUnknown() { + xxx_messageInfo_Config.DiscardUnknown(m) +} + +var xxx_messageInfo_Config proto.InternalMessageInfo + +func (m *ConfigCondition) Reset() { *m = ConfigCondition{} } +func (*ConfigCondition) ProtoMessage() {} +func (*ConfigCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_67d62912ac03ce1e, []int{1} +} +func (m *ConfigCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConfigCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ConfigCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConfigCondition.Merge(m, src) +} +func (m *ConfigCondition) XXX_Size() int { + return m.Size() +} +func (m *ConfigCondition) XXX_DiscardUnknown() { + xxx_messageInfo_ConfigCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_ConfigCondition proto.InternalMessageInfo + +func (m *ConfigList) Reset() { *m = ConfigList{} } +func (*ConfigList) ProtoMessage() {} +func (*ConfigList) Descriptor() ([]byte, []int) { + return fileDescriptor_67d62912ac03ce1e, []int{2} +} +func (m *ConfigList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConfigList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ConfigList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConfigList.Merge(m, src) +} +func (m *ConfigList) XXX_Size() int { + return m.Size() +} +func (m *ConfigList) XXX_DiscardUnknown() { + xxx_messageInfo_ConfigList.DiscardUnknown(m) +} + +var xxx_messageInfo_ConfigList proto.InternalMessageInfo + +func (m *ConfigSpec) Reset() { *m = ConfigSpec{} } +func (*ConfigSpec) ProtoMessage() {} +func (*ConfigSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_67d62912ac03ce1e, []int{3} +} +func (m *ConfigSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConfigSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ConfigSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConfigSpec.Merge(m, src) +} +func (m *ConfigSpec) XXX_Size() int { + return m.Size() +} +func (m *ConfigSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ConfigSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ConfigSpec proto.InternalMessageInfo + +func (m *ConfigStatus) Reset() { *m = ConfigStatus{} } +func (*ConfigStatus) ProtoMessage() {} +func (*ConfigStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_67d62912ac03ce1e, []int{4} +} +func (m *ConfigStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConfigStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ConfigStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConfigStatus.Merge(m, src) +} +func (m *ConfigStatus) XXX_Size() int { + return m.Size() +} +func (m *ConfigStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ConfigStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ConfigStatus proto.InternalMessageInfo + +func init() { + proto.RegisterType((*Config)(nil), "github.com.openshift.api.samples.v1.Config") + proto.RegisterType((*ConfigCondition)(nil), "github.com.openshift.api.samples.v1.ConfigCondition") + proto.RegisterType((*ConfigList)(nil), "github.com.openshift.api.samples.v1.ConfigList") + proto.RegisterType((*ConfigSpec)(nil), "github.com.openshift.api.samples.v1.ConfigSpec") + proto.RegisterType((*ConfigStatus)(nil), "github.com.openshift.api.samples.v1.ConfigStatus") +} + +func init() { + proto.RegisterFile("github.com/openshift/api/samples/v1/generated.proto", fileDescriptor_67d62912ac03ce1e) +} + +var fileDescriptor_67d62912ac03ce1e = []byte{ + // 804 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x56, 0xcd, 0x6e, 0xe4, 0x44, + 0x10, 0x1e, 0x67, 0x7e, 0x92, 0xed, 0xdd, 0x64, 0x42, 0x07, 0xb1, 0x56, 0x0e, 0xf6, 0x6a, 0x22, + 0xa1, 0x00, 0xa2, 0x4d, 0x96, 0x88, 0xe5, 0xc8, 0x7a, 0x4f, 0x91, 0x12, 0x2d, 0x74, 0x06, 0x24, + 0x10, 0x07, 0x3a, 0x9e, 0x8a, 0xa7, 0x77, 0x62, 0xbb, 0xe5, 0xee, 0x19, 0x69, 0x6e, 0x3c, 0xc2, + 0x1e, 0x79, 0x03, 0x5e, 0x82, 0x07, 0xc8, 0x8d, 0x3d, 0xee, 0xc9, 0x22, 0xe6, 0x2d, 0x72, 0x42, + 0xdd, 0xb6, 0xe7, 0x7f, 0xc5, 0x8c, 0x16, 0x89, 0xdb, 0x74, 0x55, 0x7d, 0x5f, 0x55, 0xf7, 0xf7, + 0x4d, 0xc9, 0xe8, 0xcb, 0x90, 0xab, 0xfe, 0xf0, 0x8a, 0x04, 0x49, 0xe4, 0x25, 0x02, 0x62, 0xd9, + 0xe7, 0xd7, 0xca, 0x63, 0x82, 0x7b, 0x92, 0x45, 0xe2, 0x06, 0xa4, 0x37, 0x3a, 0xf1, 0x42, 0x88, + 0x21, 0x65, 0x0a, 0x7a, 0x44, 0xa4, 0x89, 0x4a, 0xf0, 0xd1, 0x14, 0x44, 0x26, 0x20, 0xc2, 0x04, + 0x27, 0x25, 0x88, 0x8c, 0x4e, 0x0e, 0x3f, 0x9f, 0x61, 0x0e, 0x93, 0x30, 0xf1, 0x0c, 0xf6, 0x6a, + 0x78, 0x6d, 0x4e, 0xe6, 0x60, 0x7e, 0x15, 0x9c, 0x87, 0x9d, 0xc1, 0xd7, 0x92, 0xf0, 0xc4, 0xb4, + 0x0e, 0x92, 0x14, 0x56, 0xf4, 0x3d, 0x3c, 0x9d, 0xd6, 0x44, 0x2c, 0xe8, 0xf3, 0x18, 0xd2, 0xb1, + 0x27, 0x06, 0xa1, 0x0e, 0x48, 0x2f, 0x02, 0xc5, 0x56, 0xa1, 0xbc, 0x77, 0xa1, 0xd2, 0x61, 0xac, + 0x78, 0x04, 0x4b, 0x80, 0xaf, 0xfe, 0x0d, 0x20, 0x83, 0x3e, 0x44, 0x6c, 0x11, 0xd7, 0xf9, 0x6d, + 0x0b, 0xb5, 0x5e, 0x24, 0xf1, 0x35, 0x0f, 0xf1, 0x2f, 0x68, 0x47, 0x8f, 0xd3, 0x63, 0x8a, 0xd9, + 0xd6, 0x13, 0xeb, 0xf8, 0xe1, 0xd3, 0x2f, 0x48, 0xc1, 0x4a, 0x66, 0x59, 0x89, 0x18, 0x84, 0x3a, + 0x20, 0x89, 0xae, 0x26, 0xa3, 0x13, 0xf2, 0xf2, 0xea, 0x15, 0x04, 0xea, 0x02, 0x14, 0xf3, 0xf1, + 0x6d, 0xe6, 0xd6, 0xf2, 0xcc, 0x45, 0xd3, 0x18, 0x9d, 0xb0, 0xe2, 0xef, 0x50, 0x43, 0x0a, 0x08, + 0xec, 0x2d, 0xc3, 0xee, 0x91, 0x35, 0x24, 0x21, 0xc5, 0x70, 0x97, 0x02, 0x02, 0xff, 0x51, 0x49, + 0xde, 0xd0, 0x27, 0x6a, 0xa8, 0xf0, 0x8f, 0xa8, 0x25, 0x15, 0x53, 0x43, 0x69, 0xd7, 0x0d, 0xe9, + 0xc9, 0x26, 0xa4, 0x06, 0xe8, 0xef, 0x95, 0xb4, 0xad, 0xe2, 0x4c, 0x4b, 0xc2, 0xce, 0x9f, 0x75, + 0xd4, 0x2e, 0x0a, 0x5f, 0x24, 0x71, 0x8f, 0x2b, 0x9e, 0xc4, 0xf8, 0x19, 0x6a, 0xa8, 0xb1, 0x00, + 0xf3, 0x3e, 0x0f, 0xfc, 0xa3, 0x6a, 0xa0, 0xee, 0x58, 0xc0, 0x7d, 0xe6, 0x1e, 0x2c, 0x94, 0xeb, + 0x30, 0x35, 0x00, 0x7c, 0x3e, 0x99, 0x73, 0xcb, 0x40, 0x4f, 0xe7, 0x9b, 0xde, 0x67, 0xee, 0x0a, + 0x33, 0x91, 0x09, 0xd3, 0xfc, 0x68, 0xf8, 0x15, 0xda, 0xbb, 0x61, 0x52, 0x7d, 0x2f, 0x7a, 0x4c, + 0x41, 0x97, 0x47, 0x50, 0xde, 0xfe, 0xd3, 0xf5, 0x04, 0xd3, 0x08, 0xff, 0xa3, 0x72, 0x82, 0xbd, + 0xf3, 0x39, 0x26, 0xba, 0xc0, 0x8c, 0x47, 0x08, 0xeb, 0x48, 0x37, 0x65, 0xb1, 0x2c, 0x6e, 0xa5, + 0xfb, 0x35, 0x36, 0xee, 0x77, 0x58, 0xf6, 0xc3, 0xe7, 0x4b, 0x6c, 0x74, 0x45, 0x07, 0xfc, 0x31, + 0x6a, 0xa5, 0xc0, 0x64, 0x12, 0xdb, 0x4d, 0xf3, 0x62, 0x13, 0x99, 0xa8, 0x89, 0xd2, 0x32, 0x8b, + 0x3f, 0x41, 0xdb, 0x11, 0x48, 0xc9, 0x42, 0xb0, 0x5b, 0xa6, 0xb0, 0x5d, 0x16, 0x6e, 0x5f, 0x14, + 0x61, 0x5a, 0xe5, 0x3b, 0x7f, 0x58, 0x08, 0x15, 0x12, 0x9d, 0x73, 0xa9, 0xf0, 0xcf, 0x4b, 0x86, + 0x27, 0xeb, 0xdd, 0x47, 0xa3, 0x8d, 0xdd, 0xf7, 0xcb, 0x56, 0x3b, 0x55, 0x64, 0xc6, 0xec, 0xdf, + 0xa2, 0x26, 0x57, 0x10, 0x69, 0xc1, 0xeb, 0xc7, 0x0f, 0x9f, 0x7e, 0xb6, 0x81, 0x31, 0xfd, 0xdd, + 0x92, 0xb7, 0x79, 0xa6, 0x19, 0x68, 0x41, 0xd4, 0x79, 0x5d, 0xaf, 0xc6, 0xd7, 0x7f, 0x00, 0x3c, + 0x46, 0xed, 0x88, 0xc5, 0x2c, 0x84, 0x08, 0x62, 0xa5, 0x0d, 0x52, 0xd9, 0xf2, 0x65, 0x89, 0x6e, + 0x5f, 0xcc, 0xa7, 0xef, 0x33, 0xf7, 0xf4, 0x9d, 0xab, 0x33, 0x11, 0x7a, 0x2f, 0x24, 0xa9, 0xb6, + 0xdd, 0x02, 0x8e, 0x2e, 0xf6, 0xc1, 0xcf, 0x51, 0xbb, 0x1c, 0x9a, 0x42, 0xc8, 0xa5, 0x4a, 0xc7, + 0xa5, 0xad, 0x1f, 0x57, 0xad, 0x2f, 0xe7, 0xd3, 0x74, 0xb1, 0x1e, 0x3f, 0x43, 0xbb, 0x2c, 0x0d, + 0xfa, 0x5c, 0x41, 0xa0, 0x86, 0x29, 0x48, 0xbb, 0xf1, 0xa4, 0x7e, 0xfc, 0xc0, 0xff, 0x20, 0xcf, + 0xdc, 0xdd, 0xe7, 0xb3, 0x09, 0x3a, 0x5f, 0x87, 0xcf, 0xd0, 0x81, 0x1c, 0x70, 0x21, 0xa0, 0x77, + 0x16, 0xb1, 0x10, 0xa4, 0x4a, 0x81, 0x45, 0xd2, 0x6e, 0x1a, 0xf8, 0xe3, 0x3c, 0x73, 0x0f, 0x2e, + 0x97, 0xd3, 0x74, 0x15, 0x06, 0x7f, 0x83, 0xf6, 0xcb, 0x70, 0x17, 0x22, 0x71, 0xc3, 0x14, 0x48, + 0xbb, 0x65, 0x78, 0x3e, 0xcc, 0x33, 0x77, 0xff, 0x72, 0x21, 0x47, 0x97, 0xaa, 0x3b, 0xbf, 0x37, + 0xd0, 0xa3, 0xd9, 0x65, 0xf2, 0x7f, 0x8a, 0xd2, 0x47, 0x28, 0xa8, 0xf6, 0x45, 0xe5, 0xba, 0xd3, + 0x0d, 0x5c, 0x37, 0x59, 0x36, 0xd3, 0x2d, 0x3e, 0x09, 0x49, 0x3a, 0xc3, 0xbd, 0x4a, 0xfe, 0xfa, + 0xfb, 0xca, 0xdf, 0x7c, 0x3f, 0xf9, 0x5b, 0xff, 0x91, 0xfc, 0xdb, 0x9b, 0xc8, 0xaf, 0x77, 0xcf, + 0x08, 0x52, 0xc9, 0x93, 0xd8, 0xde, 0x99, 0xdf, 0x3d, 0x3f, 0x14, 0x61, 0x5a, 0xe5, 0xfd, 0xb3, + 0xdb, 0x3b, 0xa7, 0xf6, 0xe6, 0xce, 0xa9, 0xbd, 0xbd, 0x73, 0x6a, 0xbf, 0xe6, 0x8e, 0x75, 0x9b, + 0x3b, 0xd6, 0x9b, 0xdc, 0xb1, 0xde, 0xe6, 0x8e, 0xf5, 0x57, 0xee, 0x58, 0xaf, 0xff, 0x76, 0x6a, + 0x3f, 0x1d, 0xad, 0xf1, 0x69, 0xf3, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xa9, 0xd3, 0x0c, 0x8c, + 0x00, 0x09, 0x00, 0x00, +} + +func (m *Config) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Config) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Config) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ConfigCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConfigCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConfigCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x32 + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x2a + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + { + size, err := m.LastUpdateTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ConfigList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConfigList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConfigList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ConfigSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConfigSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConfigSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.SkippedTemplates) > 0 { + for iNdEx := len(m.SkippedTemplates) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.SkippedTemplates[iNdEx]) + copy(dAtA[i:], m.SkippedTemplates[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SkippedTemplates[iNdEx]))) + i-- + dAtA[i] = 0x32 + } + } + if len(m.SkippedImagestreams) > 0 { + for iNdEx := len(m.SkippedImagestreams) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.SkippedImagestreams[iNdEx]) + copy(dAtA[i:], m.SkippedImagestreams[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SkippedImagestreams[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + if len(m.Architectures) > 0 { + for iNdEx := len(m.Architectures) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Architectures[iNdEx]) + copy(dAtA[i:], m.Architectures[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Architectures[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + i -= len(m.SamplesRegistry) + copy(dAtA[i:], m.SamplesRegistry) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SamplesRegistry))) + i-- + dAtA[i] = 0x12 + i -= len(m.ManagementState) + copy(dAtA[i:], m.ManagementState) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ManagementState))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ConfigStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConfigStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConfigStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0x42 + if len(m.SkippedTemplates) > 0 { + for iNdEx := len(m.SkippedTemplates) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.SkippedTemplates[iNdEx]) + copy(dAtA[i:], m.SkippedTemplates[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SkippedTemplates[iNdEx]))) + i-- + dAtA[i] = 0x3a + } + } + if len(m.SkippedImagestreams) > 0 { + for iNdEx := len(m.SkippedImagestreams) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.SkippedImagestreams[iNdEx]) + copy(dAtA[i:], m.SkippedImagestreams[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SkippedImagestreams[iNdEx]))) + i-- + dAtA[i] = 0x32 + } + } + if len(m.Architectures) > 0 { + for iNdEx := len(m.Architectures) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Architectures[iNdEx]) + copy(dAtA[i:], m.Architectures[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Architectures[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + i -= len(m.SamplesRegistry) + copy(dAtA[i:], m.SamplesRegistry) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SamplesRegistry))) + i-- + dAtA[i] = 0x1a + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.ManagementState) + copy(dAtA[i:], m.ManagementState) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ManagementState))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Config) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ConfigCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastUpdateTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ConfigList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ConfigSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ManagementState) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.SamplesRegistry) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Architectures) > 0 { + for _, s := range m.Architectures { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.SkippedImagestreams) > 0 { + for _, s := range m.SkippedImagestreams { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.SkippedTemplates) > 0 { + for _, s := range m.SkippedTemplates { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ConfigStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ManagementState) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.SamplesRegistry) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Architectures) > 0 { + for _, s := range m.Architectures { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.SkippedImagestreams) > 0 { + for _, s := range m.SkippedImagestreams { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.SkippedTemplates) > 0 { + for _, s := range m.SkippedTemplates { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.Version) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Config) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Config{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ConfigSpec", "ConfigSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ConfigStatus", "ConfigStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ConfigCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ConfigCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastUpdateTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastUpdateTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *ConfigList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Config{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Config", "Config", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ConfigList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ConfigSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ConfigSpec{`, + `ManagementState:` + fmt.Sprintf("%v", this.ManagementState) + `,`, + `SamplesRegistry:` + fmt.Sprintf("%v", this.SamplesRegistry) + `,`, + `Architectures:` + fmt.Sprintf("%v", this.Architectures) + `,`, + `SkippedImagestreams:` + fmt.Sprintf("%v", this.SkippedImagestreams) + `,`, + `SkippedTemplates:` + fmt.Sprintf("%v", this.SkippedTemplates) + `,`, + `}`, + }, "") + return s +} +func (this *ConfigStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]ConfigCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "ConfigCondition", "ConfigCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&ConfigStatus{`, + `ManagementState:` + fmt.Sprintf("%v", this.ManagementState) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `SamplesRegistry:` + fmt.Sprintf("%v", this.SamplesRegistry) + `,`, + `Architectures:` + fmt.Sprintf("%v", this.Architectures) + `,`, + `SkippedImagestreams:` + fmt.Sprintf("%v", this.SkippedImagestreams) + `,`, + `SkippedTemplates:` + fmt.Sprintf("%v", this.SkippedTemplates) + `,`, + `Version:` + fmt.Sprintf("%v", this.Version) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Config) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Config: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Config: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfigCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfigCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfigCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = ConfigConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastUpdateTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastUpdateTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfigList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfigList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfigList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Config{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfigSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfigSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfigSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ManagementState", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ManagementState = github_com_openshift_api_operator_v1.ManagementState(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SamplesRegistry", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SamplesRegistry = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Architectures", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Architectures = append(m.Architectures, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SkippedImagestreams", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SkippedImagestreams = append(m.SkippedImagestreams, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SkippedTemplates", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SkippedTemplates = append(m.SkippedTemplates, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfigStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfigStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfigStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ManagementState", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ManagementState = github_com_openshift_api_operator_v1.ManagementState(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, ConfigCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SamplesRegistry", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SamplesRegistry = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Architectures", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Architectures = append(m.Architectures, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SkippedImagestreams", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SkippedImagestreams = append(m.SkippedImagestreams, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SkippedTemplates", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SkippedTemplates = append(m.SkippedTemplates, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/openshift/api/samples/v1/generated.proto b/vendor/github.com/openshift/api/samples/v1/generated.proto new file mode 100644 index 000000000..9892cc8a4 --- /dev/null +++ b/vendor/github.com/openshift/api/samples/v1/generated.proto @@ -0,0 +1,156 @@ + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package github.com.openshift.api.samples.v1; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "github.com/openshift/api/samples/v1"; + +// Config contains the configuration and detailed condition status for the Samples Operator. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message Config { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // +kubebuilder:validation:Required + // +required + optional ConfigSpec spec = 2; + + // +optional + optional ConfigStatus status = 3; +} + +// ConfigCondition captures various conditions of the Config +// as entries are processed. +message ConfigCondition { + // type of condition. + optional string type = 1; + + // status of the condition, one of True, False, Unknown. + optional string status = 2; + + // lastUpdateTime is the last time this condition was updated. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 3; + + // lastTransitionTime is the last time the condition transitioned from one status to another. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4; + + // reason is what caused the condition's last transition. + optional string reason = 5; + + // message is a human readable message indicating details about the transition. + optional string message = 6; +} + +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message ConfigList { + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + repeated Config items = 2; +} + +// ConfigSpec contains the desired configuration and state for the Samples Operator, controlling +// various behavior around the imagestreams and templates it creates/updates in the +// openshift namespace. +message ConfigSpec { + // managementState is top level on/off type of switch for all operators. + // When "Managed", this operator processes config and manipulates the samples accordingly. + // When "Unmanaged", this operator ignores any updates to the resources it watches. + // When "Removed", it reacts that same wasy as it does if the Config object + // is deleted, meaning any ImageStreams or Templates it manages (i.e. it honors the skipped + // lists) and the registry secret are deleted, along with the ConfigMap in the operator's + // namespace that represents the last config used to manipulate the samples, + optional string managementState = 1; + + // samplesRegistry allows for the specification of which registry is accessed + // by the ImageStreams for their image content. Defaults on the content in https://github.com/openshift/library + // that are pulled into this github repository, but based on our pulling only ocp content it typically + // defaults to registry.redhat.io. + optional string samplesRegistry = 2; + + // architectures determine which hardware architecture(s) to install, where x86_64, ppc64le, and s390x are the only + // supported choices currently. + repeated string architectures = 4; + + // skippedImagestreams specifies names of image streams that should NOT be + // created/updated. Admins can use this to allow them to delete content + // they don’t want. They will still have to manually delete the + // content but the operator will not recreate(or update) anything + // listed here. + repeated string skippedImagestreams = 5; + + // skippedTemplates specifies names of templates that should NOT be + // created/updated. Admins can use this to allow them to delete content + // they don’t want. They will still have to manually delete the + // content but the operator will not recreate(or update) anything + // listed here. + repeated string skippedTemplates = 6; +} + +// ConfigStatus contains the actual configuration in effect, as well as various details +// that describe the state of the Samples Operator. +message ConfigStatus { + // managementState reflects the current operational status of the on/off switch for + // the operator. This operator compares the ManagementState as part of determining that we are turning + // the operator back on (i.e. "Managed") when it was previously "Unmanaged". + // +patchMergeKey=type + // +patchStrategy=merge + optional string managementState = 1; + + // conditions represents the available maintenance status of the sample + // imagestreams and templates. + // +patchMergeKey=type + // +patchStrategy=merge + repeated ConfigCondition conditions = 2; + + // samplesRegistry allows for the specification of which registry is accessed + // by the ImageStreams for their image content. Defaults on the content in https://github.com/openshift/library + // that are pulled into this github repository, but based on our pulling only ocp content it typically + // defaults to registry.redhat.io. + // +patchMergeKey=type + // +patchStrategy=merge + optional string samplesRegistry = 3; + + // architectures determine which hardware architecture(s) to install, where x86_64 and ppc64le are the + // supported choices. + // +patchMergeKey=type + // +patchStrategy=merge + repeated string architectures = 5; + + // skippedImagestreams specifies names of image streams that should NOT be + // created/updated. Admins can use this to allow them to delete content + // they don’t want. They will still have to manually delete the + // content but the operator will not recreate(or update) anything + // listed here. + // +patchMergeKey=type + // +patchStrategy=merge + repeated string skippedImagestreams = 6; + + // skippedTemplates specifies names of templates that should NOT be + // created/updated. Admins can use this to allow them to delete content + // they don’t want. They will still have to manually delete the + // content but the operator will not recreate(or update) anything + // listed here. + // +patchMergeKey=type + // +patchStrategy=merge + repeated string skippedTemplates = 7; + + // version is the value of the operator's payload based version indicator when it was last successfully processed + // +patchMergeKey=type + // +patchStrategy=merge + optional string version = 8; +} + diff --git a/vendor/github.com/openshift/api/samples/v1/register.go b/vendor/github.com/openshift/api/samples/v1/register.go new file mode 100644 index 000000000..3b0611e3f --- /dev/null +++ b/vendor/github.com/openshift/api/samples/v1/register.go @@ -0,0 +1,51 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +const ( + Version = "v1" + GroupName = "samples.operator.openshift.io" +) + +var ( + scheme = runtime.NewScheme() + GroupVersion = schema.GroupVersion{Group: GroupName, Version: Version} + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // Install is a function which adds this version to a scheme + Install = SchemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = SchemeBuilder.AddToScheme +) + +func init() { + AddToScheme(scheme) +} + +// addKnownTypes adds the set of types defined in this package to the supplied scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &Config{}, + &ConfigList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} + +// Kind takes an unqualified kind and returns back a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} diff --git a/vendor/github.com/openshift/api/samples/v1/stable.config.testsuite.yaml b/vendor/github.com/openshift/api/samples/v1/stable.config.testsuite.yaml new file mode 100644 index 000000000..dbb8e14a4 --- /dev/null +++ b/vendor/github.com/openshift/api/samples/v1/stable.config.testsuite.yaml @@ -0,0 +1,14 @@ +apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this +name: "[Stable] Config" +crd: 00_samplesconfig.crd.yaml +tests: + onCreate: + - name: Should be able to create a minimal Config + initial: | + apiVersion: samples.operator.openshift.io/v1 + kind: Config + spec: {} # No spec is required for a Config + expected: | + apiVersion: samples.operator.openshift.io/v1 + kind: Config + spec: {} diff --git a/vendor/github.com/openshift/api/samples/v1/types_config.go b/vendor/github.com/openshift/api/samples/v1/types_config.go new file mode 100644 index 000000000..ea8c7b513 --- /dev/null +++ b/vendor/github.com/openshift/api/samples/v1/types_config.go @@ -0,0 +1,240 @@ +package v1 + +import ( + operatorv1 "github.com/openshift/api/operator/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Config contains the configuration and detailed condition status for the Samples Operator. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type Config struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` + + // +kubebuilder:validation:Required + // +required + Spec ConfigSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + // +optional + Status ConfigStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// ConfigSpec contains the desired configuration and state for the Samples Operator, controlling +// various behavior around the imagestreams and templates it creates/updates in the +// openshift namespace. +type ConfigSpec struct { + // managementState is top level on/off type of switch for all operators. + // When "Managed", this operator processes config and manipulates the samples accordingly. + // When "Unmanaged", this operator ignores any updates to the resources it watches. + // When "Removed", it reacts that same wasy as it does if the Config object + // is deleted, meaning any ImageStreams or Templates it manages (i.e. it honors the skipped + // lists) and the registry secret are deleted, along with the ConfigMap in the operator's + // namespace that represents the last config used to manipulate the samples, + ManagementState operatorv1.ManagementState `json:"managementState,omitempty" protobuf:"bytes,1,opt,name=managementState"` + + // samplesRegistry allows for the specification of which registry is accessed + // by the ImageStreams for their image content. Defaults on the content in https://github.com/openshift/library + // that are pulled into this github repository, but based on our pulling only ocp content it typically + // defaults to registry.redhat.io. + SamplesRegistry string `json:"samplesRegistry,omitempty" protobuf:"bytes,2,opt,name=samplesRegistry"` + + // architectures determine which hardware architecture(s) to install, where x86_64, ppc64le, and s390x are the only + // supported choices currently. + Architectures []string `json:"architectures,omitempty" protobuf:"bytes,4,opt,name=architectures"` + + // skippedImagestreams specifies names of image streams that should NOT be + // created/updated. Admins can use this to allow them to delete content + // they don’t want. They will still have to manually delete the + // content but the operator will not recreate(or update) anything + // listed here. + SkippedImagestreams []string `json:"skippedImagestreams,omitempty" protobuf:"bytes,5,opt,name=skippedImagestreams"` + + // skippedTemplates specifies names of templates that should NOT be + // created/updated. Admins can use this to allow them to delete content + // they don’t want. They will still have to manually delete the + // content but the operator will not recreate(or update) anything + // listed here. + SkippedTemplates []string `json:"skippedTemplates,omitempty" protobuf:"bytes,6,opt,name=skippedTemplates"` +} + +// ConfigStatus contains the actual configuration in effect, as well as various details +// that describe the state of the Samples Operator. +type ConfigStatus struct { + // managementState reflects the current operational status of the on/off switch for + // the operator. This operator compares the ManagementState as part of determining that we are turning + // the operator back on (i.e. "Managed") when it was previously "Unmanaged". + // +patchMergeKey=type + // +patchStrategy=merge + ManagementState operatorv1.ManagementState `json:"managementState,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=managementState"` + // conditions represents the available maintenance status of the sample + // imagestreams and templates. + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []ConfigCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=conditions"` + + // samplesRegistry allows for the specification of which registry is accessed + // by the ImageStreams for their image content. Defaults on the content in https://github.com/openshift/library + // that are pulled into this github repository, but based on our pulling only ocp content it typically + // defaults to registry.redhat.io. + // +patchMergeKey=type + // +patchStrategy=merge + SamplesRegistry string `json:"samplesRegistry,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,3,rep,name=samplesRegistry"` + + // architectures determine which hardware architecture(s) to install, where x86_64 and ppc64le are the + // supported choices. + // +patchMergeKey=type + // +patchStrategy=merge + Architectures []string `json:"architectures,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,5,rep,name=architectures"` + + // skippedImagestreams specifies names of image streams that should NOT be + // created/updated. Admins can use this to allow them to delete content + // they don’t want. They will still have to manually delete the + // content but the operator will not recreate(or update) anything + // listed here. + // +patchMergeKey=type + // +patchStrategy=merge + SkippedImagestreams []string `json:"skippedImagestreams,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=skippedImagestreams"` + + // skippedTemplates specifies names of templates that should NOT be + // created/updated. Admins can use this to allow them to delete content + // they don’t want. They will still have to manually delete the + // content but the operator will not recreate(or update) anything + // listed here. + // +patchMergeKey=type + // +patchStrategy=merge + SkippedTemplates []string `json:"skippedTemplates,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,7,rep,name=skippedTemplates"` + + // version is the value of the operator's payload based version indicator when it was last successfully processed + // +patchMergeKey=type + // +patchStrategy=merge + Version string `json:"version,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,8,rep,name=version"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type ConfigList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` + Items []Config `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +const ( + // SamplesRegistryCredentials is the name for a secret that contains a username+password/token + // for the registry, where if the secret is present, will be used for authentication. + // The corresponding secret is required to already be formatted as a + // dockerconfig secret so that it can just be copied + // to the openshift namespace + // for use during imagestream import. + SamplesRegistryCredentials = "samples-registry-credentials" + // ConfigName is the name/identifier of the static, singleton operator employed for the samples. + ConfigName = "cluster" + // X86Architecture is the value used to specify the x86_64 hardware architecture + // in the Architectures array field. + X86Architecture = "x86_64" + // AMDArchitecture is the golang value for x86 64 bit hardware architecture; for the purposes + // of this operator, it is equivalent to X86Architecture, which is kept for historical/migration + // purposes + AMDArchitecture = "amd64" + // ARMArchitecture is the value used to specify the aarch64 hardware architecture + // in the Architectures array field. + ARMArchitecture = "arm64" + // PPCArchitecture is the value used to specify the ppc64le hardware architecture + // in the Architectures array field. + PPCArchitecture = "ppc64le" + // S390Architecture is the value used to specify the s390x hardware architecture + // in the Architecture array field. + S390Architecture = "s390x" + // ConfigFinalizer is the text added to the Config.Finalizer field + // to enable finalizer processing. + ConfigFinalizer = GroupName + "/finalizer" + // SamplesManagedLabel is the key for a label added to all the imagestreams and templates + // in the openshift namespace that the Config is managing. This label is adjusted + // when changes to the SkippedImagestreams and SkippedTemplates fields are made. + SamplesManagedLabel = GroupName + "/managed" + // SamplesVersionAnnotation is the key for an annotation set on the imagestreams, templates, + // and secret that this operator manages that signifies the version of the operator that + // last managed the particular resource. + SamplesVersionAnnotation = GroupName + "/version" + // SamplesRecreateCredentialAnnotation is the key for an annotation set on the secret used + // for authentication when configuration moves from Removed to Managed but the associated secret + // in the openshift namespace does not exist. This will initiate creation of the credential + // in the openshift namespace. + SamplesRecreateCredentialAnnotation = GroupName + "/recreate" + // OperatorNamespace is the namespace the operator runs in. + OperatorNamespace = "openshift-cluster-samples-operator" +) + +type ConfigConditionType string + +// the valid conditions of the Config + +const ( + // ImportCredentialsExist represents the state of any credentials specified by + // the SamplesRegistry field in the Spec. + ImportCredentialsExist ConfigConditionType = "ImportCredentialsExist" + // SamplesExist represents whether an incoming Config has been successfully + // processed or not all, or whether the last Config to come in has been + // successfully processed. + SamplesExist ConfigConditionType = "SamplesExist" + // ConfigurationValid represents whether the latest Config to come in + // tried to make a support configuration change. Currently, changes to the + // InstallType and Architectures list after initial processing is not allowed. + ConfigurationValid ConfigConditionType = "ConfigurationValid" + // ImageChangesInProgress represents the state between where the samples operator has + // started updating the imagestreams and when the spec and status generations for each + // tag match. The list of imagestreams that are still in progress will be stored + // in the Reason field of the condition. The Reason field being empty corresponds + // with this condition being marked true. + ImageChangesInProgress ConfigConditionType = "ImageChangesInProgress" + // RemovePending represents whether the Config Spec ManagementState + // has been set to Removed, but we have not completed the deletion of the + // samples, pull secret, etc. and set the Config Spec ManagementState to Removed. + // Also note, while a samples creation/update cycle is still in progress, and ImageChagesInProgress + // is True, the operator will not initiate the deletions, as we + // do not want the create/updates and deletes of the samples to be occurring in parallel. + // So the actual Removed processing will be initated only after ImageChangesInProgress is set + // to false. Once the deletions are done, and the Status ManagementState is Removed, this + // condition is set back to False. Lastly, when this condition is set to True, the + // ClusterOperator Progressing condition will be set to True. + RemovePending ConfigConditionType = "RemovePending" + // MigrationInProgress represents the special case where the operator is running off of + // a new version of its image, and samples are deployed of a previous version. This condition + // facilitates the maintenance of this operator's ClusterOperator object. + MigrationInProgress ConfigConditionType = "MigrationInProgress" + // ImportImageErrorsExist registers any image import failures, separate from ImageChangeInProgress, + // so that we can a) indicate a problem to the ClusterOperator status, b) mark the current + // change cycle as complete in both ClusterOperator and Config; retry on import will + // occur by the next relist interval if it was an intermittent issue; + ImportImageErrorsExist ConfigConditionType = "ImportImageErrorsExist" +) + +// ConfigCondition captures various conditions of the Config +// as entries are processed. +type ConfigCondition struct { + // type of condition. + Type ConfigConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=ConfigConditionType"` + // status of the condition, one of True, False, Unknown. + Status corev1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/kubernetes/pkg/api/v1.ConditionStatus"` + // lastUpdateTime is the last time this condition was updated. + LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty" protobuf:"bytes,3,opt,name=lastUpdateTime"` + // lastTransitionTime is the last time the condition transitioned from one status to another. + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"` + // reason is what caused the condition's last transition. + Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"` + // message is a human readable message indicating details about the transition. + Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"` +} diff --git a/vendor/github.com/openshift/api/samples/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/samples/v1/zz_generated.deepcopy.go new file mode 100644 index 000000000..82b42d522 --- /dev/null +++ b/vendor/github.com/openshift/api/samples/v1/zz_generated.deepcopy.go @@ -0,0 +1,158 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Config) DeepCopyInto(out *Config) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Config. +func (in *Config) DeepCopy() *Config { + if in == nil { + return nil + } + out := new(Config) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Config) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigCondition) DeepCopyInto(out *ConfigCondition) { + *out = *in + in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime) + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigCondition. +func (in *ConfigCondition) DeepCopy() *ConfigCondition { + if in == nil { + return nil + } + out := new(ConfigCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigList) DeepCopyInto(out *ConfigList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Config, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigList. +func (in *ConfigList) DeepCopy() *ConfigList { + if in == nil { + return nil + } + out := new(ConfigList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ConfigList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigSpec) DeepCopyInto(out *ConfigSpec) { + *out = *in + if in.Architectures != nil { + in, out := &in.Architectures, &out.Architectures + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SkippedImagestreams != nil { + in, out := &in.SkippedImagestreams, &out.SkippedImagestreams + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SkippedTemplates != nil { + in, out := &in.SkippedTemplates, &out.SkippedTemplates + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigSpec. +func (in *ConfigSpec) DeepCopy() *ConfigSpec { + if in == nil { + return nil + } + out := new(ConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigStatus) DeepCopyInto(out *ConfigStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]ConfigCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Architectures != nil { + in, out := &in.Architectures, &out.Architectures + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SkippedImagestreams != nil { + in, out := &in.SkippedImagestreams, &out.SkippedImagestreams + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SkippedTemplates != nil { + in, out := &in.SkippedTemplates, &out.SkippedTemplates + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigStatus. +func (in *ConfigStatus) DeepCopy() *ConfigStatus { + if in == nil { + return nil + } + out := new(ConfigStatus) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/api/samples/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/samples/v1/zz_generated.swagger_doc_generated.go new file mode 100644 index 000000000..3d3317f7d --- /dev/null +++ b/vendor/github.com/openshift/api/samples/v1/zz_generated.swagger_doc_generated.go @@ -0,0 +1,74 @@ +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_Config = map[string]string{ + "": "Config contains the configuration and detailed condition status for the Samples Operator.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (Config) SwaggerDoc() map[string]string { + return map_Config +} + +var map_ConfigCondition = map[string]string{ + "": "ConfigCondition captures various conditions of the Config as entries are processed.", + "type": "type of condition.", + "status": "status of the condition, one of True, False, Unknown.", + "lastUpdateTime": "lastUpdateTime is the last time this condition was updated.", + "lastTransitionTime": "lastTransitionTime is the last time the condition transitioned from one status to another.", + "reason": "reason is what caused the condition's last transition.", + "message": "message is a human readable message indicating details about the transition.", +} + +func (ConfigCondition) SwaggerDoc() map[string]string { + return map_ConfigCondition +} + +var map_ConfigList = map[string]string{ + "": "Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (ConfigList) SwaggerDoc() map[string]string { + return map_ConfigList +} + +var map_ConfigSpec = map[string]string{ + "": "ConfigSpec contains the desired configuration and state for the Samples Operator, controlling various behavior around the imagestreams and templates it creates/updates in the openshift namespace.", + "managementState": "managementState is top level on/off type of switch for all operators. When \"Managed\", this operator processes config and manipulates the samples accordingly. When \"Unmanaged\", this operator ignores any updates to the resources it watches. When \"Removed\", it reacts that same wasy as it does if the Config object is deleted, meaning any ImageStreams or Templates it manages (i.e. it honors the skipped lists) and the registry secret are deleted, along with the ConfigMap in the operator's namespace that represents the last config used to manipulate the samples,", + "samplesRegistry": "samplesRegistry allows for the specification of which registry is accessed by the ImageStreams for their image content. Defaults on the content in https://github.com/openshift/library that are pulled into this github repository, but based on our pulling only ocp content it typically defaults to registry.redhat.io.", + "architectures": "architectures determine which hardware architecture(s) to install, where x86_64, ppc64le, and s390x are the only supported choices currently.", + "skippedImagestreams": "skippedImagestreams specifies names of image streams that should NOT be created/updated. Admins can use this to allow them to delete content they don’t want. They will still have to manually delete the content but the operator will not recreate(or update) anything listed here.", + "skippedTemplates": "skippedTemplates specifies names of templates that should NOT be created/updated. Admins can use this to allow them to delete content they don’t want. They will still have to manually delete the content but the operator will not recreate(or update) anything listed here.", +} + +func (ConfigSpec) SwaggerDoc() map[string]string { + return map_ConfigSpec +} + +var map_ConfigStatus = map[string]string{ + "": "ConfigStatus contains the actual configuration in effect, as well as various details that describe the state of the Samples Operator.", + "managementState": "managementState reflects the current operational status of the on/off switch for the operator. This operator compares the ManagementState as part of determining that we are turning the operator back on (i.e. \"Managed\") when it was previously \"Unmanaged\".", + "conditions": "conditions represents the available maintenance status of the sample imagestreams and templates.", + "samplesRegistry": "samplesRegistry allows for the specification of which registry is accessed by the ImageStreams for their image content. Defaults on the content in https://github.com/openshift/library that are pulled into this github repository, but based on our pulling only ocp content it typically defaults to registry.redhat.io.", + "architectures": "architectures determine which hardware architecture(s) to install, where x86_64 and ppc64le are the supported choices.", + "skippedImagestreams": "skippedImagestreams specifies names of image streams that should NOT be created/updated. Admins can use this to allow them to delete content they don’t want. They will still have to manually delete the content but the operator will not recreate(or update) anything listed here.", + "skippedTemplates": "skippedTemplates specifies names of templates that should NOT be created/updated. Admins can use this to allow them to delete content they don’t want. They will still have to manually delete the content but the operator will not recreate(or update) anything listed here.", + "version": "version is the value of the operator's payload based version indicator when it was last successfully processed", +} + +func (ConfigStatus) SwaggerDoc() map[string]string { + return map_ConfigStatus +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/api/security/install.go b/vendor/github.com/openshift/api/security/install.go new file mode 100644 index 000000000..c2b04c432 --- /dev/null +++ b/vendor/github.com/openshift/api/security/install.go @@ -0,0 +1,26 @@ +package security + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + securityv1 "github.com/openshift/api/security/v1" +) + +const ( + GroupName = "security.openshift.io" +) + +var ( + schemeBuilder = runtime.NewSchemeBuilder(securityv1.Install) + // Install is a function which adds every version of this group to a scheme + Install = schemeBuilder.AddToScheme +) + +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +func Kind(kind string) schema.GroupKind { + return schema.GroupKind{Group: GroupName, Kind: kind} +} diff --git a/vendor/github.com/openshift/api/security/v1/0000_03_security-openshift_01_scc.crd.yaml b/vendor/github.com/openshift/api/security/v1/0000_03_security-openshift_01_scc.crd.yaml new file mode 100644 index 000000000..f08d16578 --- /dev/null +++ b/vendor/github.com/openshift/api/security/v1/0000_03_security-openshift_01_scc.crd.yaml @@ -0,0 +1,279 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/470 + include.release.openshift.io/ibm-cloud-managed: "true" + include.release.openshift.io/self-managed-high-availability: "true" + include.release.openshift.io/single-node-developer: "true" + name: securitycontextconstraints.security.openshift.io +spec: + group: security.openshift.io + names: + kind: SecurityContextConstraints + listKind: SecurityContextConstraintsList + plural: securitycontextconstraints + singular: securitycontextconstraints + scope: Cluster + versions: + - additionalPrinterColumns: + - description: Determines if a container can request to be run as privileged + jsonPath: .allowPrivilegedContainer + name: Priv + type: string + - description: A list of capabilities that can be requested to add to the container + jsonPath: .allowedCapabilities + name: Caps + type: string + - description: Strategy that will dictate what labels will be set in the SecurityContext + jsonPath: .seLinuxContext.type + name: SELinux + type: string + - description: Strategy that will dictate what RunAsUser is used in the SecurityContext + jsonPath: .runAsUser.type + name: RunAsUser + type: string + - description: Strategy that will dictate what fs group is used by the SecurityContext + jsonPath: .fsGroup.type + name: FSGroup + type: string + - description: Strategy that will dictate what supplemental groups are used by the SecurityContext + jsonPath: .supplementalGroups.type + name: SupGroup + type: string + - description: Sort order of SCCs + jsonPath: .priority + name: Priority + type: string + - description: Force containers to run with a read only root file system + jsonPath: .readOnlyRootFilesystem + name: ReadOnlyRootFS + type: string + - description: White list of allowed volume plugins + jsonPath: .volumes + name: Volumes + type: string + name: v1 + schema: + openAPIV3Schema: + description: "SecurityContextConstraints governs the ability to make requests that affect the SecurityContext that will be applied to a container. For historical reasons SCC was exposed under the core Kubernetes API group. That exposure is deprecated and will be removed in a future release - users should instead use the security.openshift.io group to manage SecurityContextConstraints. \n Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer)." + type: object + required: + - allowHostDirVolumePlugin + - allowHostIPC + - allowHostNetwork + - allowHostPID + - allowHostPorts + - allowPrivilegedContainer + - allowedCapabilities + - defaultAddCapabilities + - priority + - readOnlyRootFilesystem + - requiredDropCapabilities + - volumes + properties: + allowHostDirVolumePlugin: + description: AllowHostDirVolumePlugin determines if the policy allow containers to use the HostDir volume plugin + type: boolean + allowHostIPC: + description: AllowHostIPC determines if the policy allows host ipc in the containers. + type: boolean + allowHostNetwork: + description: AllowHostNetwork determines if the policy allows the use of HostNetwork in the pod spec. + type: boolean + allowHostPID: + description: AllowHostPID determines if the policy allows host pid in the containers. + type: boolean + allowHostPorts: + description: AllowHostPorts determines if the policy allows host ports in the containers. + type: boolean + allowPrivilegeEscalation: + description: AllowPrivilegeEscalation determines if a pod can request to allow privilege escalation. If unspecified, defaults to true. + type: boolean + nullable: true + allowPrivilegedContainer: + description: AllowPrivilegedContainer determines if a container can request to be run as privileged. + type: boolean + allowedCapabilities: + description: AllowedCapabilities is a list of capabilities that can be requested to add to the container. Capabilities in this field maybe added at the pod author's discretion. You must not list a capability in both AllowedCapabilities and RequiredDropCapabilities. To allow all capabilities you may use '*'. + type: array + items: + description: Capability represent POSIX capabilities type + type: string + nullable: true + allowedFlexVolumes: + description: AllowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes is allowed in the "Volumes" field. + type: array + items: + description: AllowedFlexVolume represents a single Flexvolume that is allowed to be used. + type: object + required: + - driver + properties: + driver: + description: Driver is the name of the Flexvolume driver. + type: string + nullable: true + allowedUnsafeSysctls: + description: "AllowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. Kubelet has to whitelist all allowed unsafe sysctls explicitly to avoid rejection. \n Examples: e.g. \"foo/*\" allows \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" allows \"foo.bar\", \"foo.baz\", etc." + type: array + items: + type: string + nullable: true + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + defaultAddCapabilities: + description: DefaultAddCapabilities is the default set of capabilities that will be added to the container unless the pod spec specifically drops the capability. You may not list a capabiility in both DefaultAddCapabilities and RequiredDropCapabilities. + type: array + items: + description: Capability represent POSIX capabilities type + type: string + nullable: true + defaultAllowPrivilegeEscalation: + description: DefaultAllowPrivilegeEscalation controls the default setting for whether a process can gain more privileges than its parent process. + type: boolean + nullable: true + forbiddenSysctls: + description: "ForbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of forbidden sysctls. Single * means all sysctls are forbidden. \n Examples: e.g. \"foo/*\" forbids \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" forbids \"foo.bar\", \"foo.baz\", etc." + type: array + items: + type: string + nullable: true + fsGroup: + description: FSGroup is the strategy that will dictate what fs group is used by the SecurityContext. + type: object + properties: + ranges: + description: Ranges are the allowed ranges of fs groups. If you would like to force a single fs group then supply a single range with the same start and end. + type: array + items: + description: 'IDRange provides a min/max of an allowed range of IDs. TODO: this could be reused for UIDs.' + type: object + properties: + max: + description: Max is the end of the range, inclusive. + type: integer + format: int64 + min: + description: Min is the start of the range, inclusive. + type: integer + format: int64 + type: + description: Type is the strategy that will dictate what FSGroup is used in the SecurityContext. + type: string + nullable: true + groups: + description: The groups that have permission to use this security context constraints + type: array + items: + type: string + nullable: true + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + priority: + description: Priority influences the sort order of SCCs when evaluating which SCCs to try first for a given pod request based on access in the Users and Groups fields. The higher the int, the higher priority. An unset value is considered a 0 priority. If scores for multiple SCCs are equal they will be sorted from most restrictive to least restrictive. If both priorities and restrictions are equal the SCCs will be sorted by name. + type: integer + format: int32 + nullable: true + readOnlyRootFilesystem: + description: ReadOnlyRootFilesystem when set to true will force containers to run with a read only root file system. If the container specifically requests to run with a non-read only root file system the SCC should deny the pod. If set to false the container may run with a read only root file system if it wishes but it will not be forced to. + type: boolean + requiredDropCapabilities: + description: RequiredDropCapabilities are the capabilities that will be dropped from the container. These are required to be dropped and cannot be added. + type: array + items: + description: Capability represent POSIX capabilities type + type: string + nullable: true + runAsUser: + description: RunAsUser is the strategy that will dictate what RunAsUser is used in the SecurityContext. + type: object + properties: + type: + description: Type is the strategy that will dictate what RunAsUser is used in the SecurityContext. + type: string + uid: + description: UID is the user id that containers must run as. Required for the MustRunAs strategy if not using namespace/service account allocated uids. + type: integer + format: int64 + uidRangeMax: + description: UIDRangeMax defines the max value for a strategy that allocates by range. + type: integer + format: int64 + uidRangeMin: + description: UIDRangeMin defines the min value for a strategy that allocates by range. + type: integer + format: int64 + nullable: true + seLinuxContext: + description: SELinuxContext is the strategy that will dictate what labels will be set in the SecurityContext. + type: object + properties: + seLinuxOptions: + description: seLinuxOptions required to run as; required for MustRunAs + type: object + properties: + level: + description: Level is SELinux level label that applies to the container. + type: string + role: + description: Role is a SELinux role label that applies to the container. + type: string + type: + description: Type is a SELinux type label that applies to the container. + type: string + user: + description: User is a SELinux user label that applies to the container. + type: string + type: + description: Type is the strategy that will dictate what SELinux context is used in the SecurityContext. + type: string + nullable: true + seccompProfiles: + description: "SeccompProfiles lists the allowed profiles that may be set for the pod or container's seccomp annotations. An unset (nil) or empty value means that no profiles may be specifid by the pod or container.\tThe wildcard '*' may be used to allow all profiles. When used to generate a value for a pod the first non-wildcard profile will be used as the default." + type: array + items: + type: string + nullable: true + supplementalGroups: + description: SupplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext. + type: object + properties: + ranges: + description: Ranges are the allowed ranges of supplemental groups. If you would like to force a single supplemental group then supply a single range with the same start and end. + type: array + items: + description: 'IDRange provides a min/max of an allowed range of IDs. TODO: this could be reused for UIDs.' + type: object + properties: + max: + description: Max is the end of the range, inclusive. + type: integer + format: int64 + min: + description: Min is the start of the range, inclusive. + type: integer + format: int64 + type: + description: Type is the strategy that will dictate what supplemental groups is used in the SecurityContext. + type: string + nullable: true + users: + description: The users who have permissions to use this security context constraints + type: array + items: + type: string + nullable: true + volumes: + description: Volumes is a white list of allowed volume plugins. FSType corresponds directly with the field names of a VolumeSource (azureFile, configMap, emptyDir). To allow all volumes you may use "*". To allow no volumes, set to ["none"]. + type: array + items: + description: FS Type gives strong typing to different file systems that are used by volumes. + type: string + nullable: true + served: true + storage: true diff --git a/vendor/github.com/openshift/api/security/v1/Makefile b/vendor/github.com/openshift/api/security/v1/Makefile new file mode 100644 index 000000000..096e6fa2c --- /dev/null +++ b/vendor/github.com/openshift/api/security/v1/Makefile @@ -0,0 +1,3 @@ +.PHONY: test +test: + make -C ../../tests test GINKGO_EXTRA_ARGS=--focus="security.openshift.io/v1" diff --git a/vendor/github.com/openshift/api/security/v1/consts.go b/vendor/github.com/openshift/api/security/v1/consts.go new file mode 100644 index 000000000..3b686c31d --- /dev/null +++ b/vendor/github.com/openshift/api/security/v1/consts.go @@ -0,0 +1,13 @@ +package v1 + +const ( + UIDRangeAnnotation = "openshift.io/sa.scc.uid-range" + // SupplementalGroupsAnnotation contains a comma delimited list of allocated supplemental groups + // for the namespace. Groups are in the form of a Block which supports {start}/{length} or {start}-{end} + SupplementalGroupsAnnotation = "openshift.io/sa.scc.supplemental-groups" + MCSAnnotation = "openshift.io/sa.scc.mcs" + ValidatedSCCAnnotation = "openshift.io/scc" + // This annotation pins required SCCs for core OpenShift workloads to prevent preemption of custom SCCs. + // It is being used in the SCC admission plugin. + RequiredSCCAnnotation = "openshift.io/required-scc" +) diff --git a/vendor/github.com/openshift/api/security/v1/doc.go b/vendor/github.com/openshift/api/security/v1/doc.go new file mode 100644 index 000000000..44fe37eb2 --- /dev/null +++ b/vendor/github.com/openshift/api/security/v1/doc.go @@ -0,0 +1,8 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=github.com/openshift/origin/pkg/security/apis/security +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true + +// +groupName=security.openshift.io +// Package v1 is the v1 version of the API. +package v1 diff --git a/vendor/github.com/openshift/api/security/v1/generated.pb.go b/vendor/github.com/openshift/api/security/v1/generated.pb.go new file mode 100644 index 000000000..d57b162c4 --- /dev/null +++ b/vendor/github.com/openshift/api/security/v1/generated.pb.go @@ -0,0 +1,5283 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/openshift/api/security/v1/generated.proto + +package v1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *AllowedFlexVolume) Reset() { *m = AllowedFlexVolume{} } +func (*AllowedFlexVolume) ProtoMessage() {} +func (*AllowedFlexVolume) Descriptor() ([]byte, []int) { + return fileDescriptor_af65d9655aa67551, []int{0} +} +func (m *AllowedFlexVolume) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AllowedFlexVolume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AllowedFlexVolume) XXX_Merge(src proto.Message) { + xxx_messageInfo_AllowedFlexVolume.Merge(m, src) +} +func (m *AllowedFlexVolume) XXX_Size() int { + return m.Size() +} +func (m *AllowedFlexVolume) XXX_DiscardUnknown() { + xxx_messageInfo_AllowedFlexVolume.DiscardUnknown(m) +} + +var xxx_messageInfo_AllowedFlexVolume proto.InternalMessageInfo + +func (m *FSGroupStrategyOptions) Reset() { *m = FSGroupStrategyOptions{} } +func (*FSGroupStrategyOptions) ProtoMessage() {} +func (*FSGroupStrategyOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_af65d9655aa67551, []int{1} +} +func (m *FSGroupStrategyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FSGroupStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FSGroupStrategyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_FSGroupStrategyOptions.Merge(m, src) +} +func (m *FSGroupStrategyOptions) XXX_Size() int { + return m.Size() +} +func (m *FSGroupStrategyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_FSGroupStrategyOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_FSGroupStrategyOptions proto.InternalMessageInfo + +func (m *IDRange) Reset() { *m = IDRange{} } +func (*IDRange) ProtoMessage() {} +func (*IDRange) Descriptor() ([]byte, []int) { + return fileDescriptor_af65d9655aa67551, []int{2} +} +func (m *IDRange) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IDRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IDRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_IDRange.Merge(m, src) +} +func (m *IDRange) XXX_Size() int { + return m.Size() +} +func (m *IDRange) XXX_DiscardUnknown() { + xxx_messageInfo_IDRange.DiscardUnknown(m) +} + +var xxx_messageInfo_IDRange proto.InternalMessageInfo + +func (m *PodSecurityPolicyReview) Reset() { *m = PodSecurityPolicyReview{} } +func (*PodSecurityPolicyReview) ProtoMessage() {} +func (*PodSecurityPolicyReview) Descriptor() ([]byte, []int) { + return fileDescriptor_af65d9655aa67551, []int{3} +} +func (m *PodSecurityPolicyReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodSecurityPolicyReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodSecurityPolicyReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSecurityPolicyReview.Merge(m, src) +} +func (m *PodSecurityPolicyReview) XXX_Size() int { + return m.Size() +} +func (m *PodSecurityPolicyReview) XXX_DiscardUnknown() { + xxx_messageInfo_PodSecurityPolicyReview.DiscardUnknown(m) +} + +var xxx_messageInfo_PodSecurityPolicyReview proto.InternalMessageInfo + +func (m *PodSecurityPolicyReviewSpec) Reset() { *m = PodSecurityPolicyReviewSpec{} } +func (*PodSecurityPolicyReviewSpec) ProtoMessage() {} +func (*PodSecurityPolicyReviewSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_af65d9655aa67551, []int{4} +} +func (m *PodSecurityPolicyReviewSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodSecurityPolicyReviewSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodSecurityPolicyReviewSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSecurityPolicyReviewSpec.Merge(m, src) +} +func (m *PodSecurityPolicyReviewSpec) XXX_Size() int { + return m.Size() +} +func (m *PodSecurityPolicyReviewSpec) XXX_DiscardUnknown() { + xxx_messageInfo_PodSecurityPolicyReviewSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_PodSecurityPolicyReviewSpec proto.InternalMessageInfo + +func (m *PodSecurityPolicyReviewStatus) Reset() { *m = PodSecurityPolicyReviewStatus{} } +func (*PodSecurityPolicyReviewStatus) ProtoMessage() {} +func (*PodSecurityPolicyReviewStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_af65d9655aa67551, []int{5} +} +func (m *PodSecurityPolicyReviewStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodSecurityPolicyReviewStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodSecurityPolicyReviewStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSecurityPolicyReviewStatus.Merge(m, src) +} +func (m *PodSecurityPolicyReviewStatus) XXX_Size() int { + return m.Size() +} +func (m *PodSecurityPolicyReviewStatus) XXX_DiscardUnknown() { + xxx_messageInfo_PodSecurityPolicyReviewStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_PodSecurityPolicyReviewStatus proto.InternalMessageInfo + +func (m *PodSecurityPolicySelfSubjectReview) Reset() { *m = PodSecurityPolicySelfSubjectReview{} } +func (*PodSecurityPolicySelfSubjectReview) ProtoMessage() {} +func (*PodSecurityPolicySelfSubjectReview) Descriptor() ([]byte, []int) { + return fileDescriptor_af65d9655aa67551, []int{6} +} +func (m *PodSecurityPolicySelfSubjectReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodSecurityPolicySelfSubjectReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodSecurityPolicySelfSubjectReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSecurityPolicySelfSubjectReview.Merge(m, src) +} +func (m *PodSecurityPolicySelfSubjectReview) XXX_Size() int { + return m.Size() +} +func (m *PodSecurityPolicySelfSubjectReview) XXX_DiscardUnknown() { + xxx_messageInfo_PodSecurityPolicySelfSubjectReview.DiscardUnknown(m) +} + +var xxx_messageInfo_PodSecurityPolicySelfSubjectReview proto.InternalMessageInfo + +func (m *PodSecurityPolicySelfSubjectReviewSpec) Reset() { + *m = PodSecurityPolicySelfSubjectReviewSpec{} +} +func (*PodSecurityPolicySelfSubjectReviewSpec) ProtoMessage() {} +func (*PodSecurityPolicySelfSubjectReviewSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_af65d9655aa67551, []int{7} +} +func (m *PodSecurityPolicySelfSubjectReviewSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodSecurityPolicySelfSubjectReviewSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodSecurityPolicySelfSubjectReviewSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSecurityPolicySelfSubjectReviewSpec.Merge(m, src) +} +func (m *PodSecurityPolicySelfSubjectReviewSpec) XXX_Size() int { + return m.Size() +} +func (m *PodSecurityPolicySelfSubjectReviewSpec) XXX_DiscardUnknown() { + xxx_messageInfo_PodSecurityPolicySelfSubjectReviewSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_PodSecurityPolicySelfSubjectReviewSpec proto.InternalMessageInfo + +func (m *PodSecurityPolicySubjectReview) Reset() { *m = PodSecurityPolicySubjectReview{} } +func (*PodSecurityPolicySubjectReview) ProtoMessage() {} +func (*PodSecurityPolicySubjectReview) Descriptor() ([]byte, []int) { + return fileDescriptor_af65d9655aa67551, []int{8} +} +func (m *PodSecurityPolicySubjectReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodSecurityPolicySubjectReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodSecurityPolicySubjectReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSecurityPolicySubjectReview.Merge(m, src) +} +func (m *PodSecurityPolicySubjectReview) XXX_Size() int { + return m.Size() +} +func (m *PodSecurityPolicySubjectReview) XXX_DiscardUnknown() { + xxx_messageInfo_PodSecurityPolicySubjectReview.DiscardUnknown(m) +} + +var xxx_messageInfo_PodSecurityPolicySubjectReview proto.InternalMessageInfo + +func (m *PodSecurityPolicySubjectReviewSpec) Reset() { *m = PodSecurityPolicySubjectReviewSpec{} } +func (*PodSecurityPolicySubjectReviewSpec) ProtoMessage() {} +func (*PodSecurityPolicySubjectReviewSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_af65d9655aa67551, []int{9} +} +func (m *PodSecurityPolicySubjectReviewSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodSecurityPolicySubjectReviewSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodSecurityPolicySubjectReviewSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSecurityPolicySubjectReviewSpec.Merge(m, src) +} +func (m *PodSecurityPolicySubjectReviewSpec) XXX_Size() int { + return m.Size() +} +func (m *PodSecurityPolicySubjectReviewSpec) XXX_DiscardUnknown() { + xxx_messageInfo_PodSecurityPolicySubjectReviewSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_PodSecurityPolicySubjectReviewSpec proto.InternalMessageInfo + +func (m *PodSecurityPolicySubjectReviewStatus) Reset() { *m = PodSecurityPolicySubjectReviewStatus{} } +func (*PodSecurityPolicySubjectReviewStatus) ProtoMessage() {} +func (*PodSecurityPolicySubjectReviewStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_af65d9655aa67551, []int{10} +} +func (m *PodSecurityPolicySubjectReviewStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodSecurityPolicySubjectReviewStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodSecurityPolicySubjectReviewStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSecurityPolicySubjectReviewStatus.Merge(m, src) +} +func (m *PodSecurityPolicySubjectReviewStatus) XXX_Size() int { + return m.Size() +} +func (m *PodSecurityPolicySubjectReviewStatus) XXX_DiscardUnknown() { + xxx_messageInfo_PodSecurityPolicySubjectReviewStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_PodSecurityPolicySubjectReviewStatus proto.InternalMessageInfo + +func (m *RangeAllocation) Reset() { *m = RangeAllocation{} } +func (*RangeAllocation) ProtoMessage() {} +func (*RangeAllocation) Descriptor() ([]byte, []int) { + return fileDescriptor_af65d9655aa67551, []int{11} +} +func (m *RangeAllocation) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RangeAllocation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RangeAllocation) XXX_Merge(src proto.Message) { + xxx_messageInfo_RangeAllocation.Merge(m, src) +} +func (m *RangeAllocation) XXX_Size() int { + return m.Size() +} +func (m *RangeAllocation) XXX_DiscardUnknown() { + xxx_messageInfo_RangeAllocation.DiscardUnknown(m) +} + +var xxx_messageInfo_RangeAllocation proto.InternalMessageInfo + +func (m *RangeAllocationList) Reset() { *m = RangeAllocationList{} } +func (*RangeAllocationList) ProtoMessage() {} +func (*RangeAllocationList) Descriptor() ([]byte, []int) { + return fileDescriptor_af65d9655aa67551, []int{12} +} +func (m *RangeAllocationList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RangeAllocationList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RangeAllocationList) XXX_Merge(src proto.Message) { + xxx_messageInfo_RangeAllocationList.Merge(m, src) +} +func (m *RangeAllocationList) XXX_Size() int { + return m.Size() +} +func (m *RangeAllocationList) XXX_DiscardUnknown() { + xxx_messageInfo_RangeAllocationList.DiscardUnknown(m) +} + +var xxx_messageInfo_RangeAllocationList proto.InternalMessageInfo + +func (m *RunAsUserStrategyOptions) Reset() { *m = RunAsUserStrategyOptions{} } +func (*RunAsUserStrategyOptions) ProtoMessage() {} +func (*RunAsUserStrategyOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_af65d9655aa67551, []int{13} +} +func (m *RunAsUserStrategyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RunAsUserStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RunAsUserStrategyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_RunAsUserStrategyOptions.Merge(m, src) +} +func (m *RunAsUserStrategyOptions) XXX_Size() int { + return m.Size() +} +func (m *RunAsUserStrategyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_RunAsUserStrategyOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_RunAsUserStrategyOptions proto.InternalMessageInfo + +func (m *SELinuxContextStrategyOptions) Reset() { *m = SELinuxContextStrategyOptions{} } +func (*SELinuxContextStrategyOptions) ProtoMessage() {} +func (*SELinuxContextStrategyOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_af65d9655aa67551, []int{14} +} +func (m *SELinuxContextStrategyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SELinuxContextStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SELinuxContextStrategyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_SELinuxContextStrategyOptions.Merge(m, src) +} +func (m *SELinuxContextStrategyOptions) XXX_Size() int { + return m.Size() +} +func (m *SELinuxContextStrategyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_SELinuxContextStrategyOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_SELinuxContextStrategyOptions proto.InternalMessageInfo + +func (m *SecurityContextConstraints) Reset() { *m = SecurityContextConstraints{} } +func (*SecurityContextConstraints) ProtoMessage() {} +func (*SecurityContextConstraints) Descriptor() ([]byte, []int) { + return fileDescriptor_af65d9655aa67551, []int{15} +} +func (m *SecurityContextConstraints) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SecurityContextConstraints) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SecurityContextConstraints) XXX_Merge(src proto.Message) { + xxx_messageInfo_SecurityContextConstraints.Merge(m, src) +} +func (m *SecurityContextConstraints) XXX_Size() int { + return m.Size() +} +func (m *SecurityContextConstraints) XXX_DiscardUnknown() { + xxx_messageInfo_SecurityContextConstraints.DiscardUnknown(m) +} + +var xxx_messageInfo_SecurityContextConstraints proto.InternalMessageInfo + +func (m *SecurityContextConstraintsList) Reset() { *m = SecurityContextConstraintsList{} } +func (*SecurityContextConstraintsList) ProtoMessage() {} +func (*SecurityContextConstraintsList) Descriptor() ([]byte, []int) { + return fileDescriptor_af65d9655aa67551, []int{16} +} +func (m *SecurityContextConstraintsList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SecurityContextConstraintsList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SecurityContextConstraintsList) XXX_Merge(src proto.Message) { + xxx_messageInfo_SecurityContextConstraintsList.Merge(m, src) +} +func (m *SecurityContextConstraintsList) XXX_Size() int { + return m.Size() +} +func (m *SecurityContextConstraintsList) XXX_DiscardUnknown() { + xxx_messageInfo_SecurityContextConstraintsList.DiscardUnknown(m) +} + +var xxx_messageInfo_SecurityContextConstraintsList proto.InternalMessageInfo + +func (m *ServiceAccountPodSecurityPolicyReviewStatus) Reset() { + *m = ServiceAccountPodSecurityPolicyReviewStatus{} +} +func (*ServiceAccountPodSecurityPolicyReviewStatus) ProtoMessage() {} +func (*ServiceAccountPodSecurityPolicyReviewStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_af65d9655aa67551, []int{17} +} +func (m *ServiceAccountPodSecurityPolicyReviewStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceAccountPodSecurityPolicyReviewStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ServiceAccountPodSecurityPolicyReviewStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceAccountPodSecurityPolicyReviewStatus.Merge(m, src) +} +func (m *ServiceAccountPodSecurityPolicyReviewStatus) XXX_Size() int { + return m.Size() +} +func (m *ServiceAccountPodSecurityPolicyReviewStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceAccountPodSecurityPolicyReviewStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceAccountPodSecurityPolicyReviewStatus proto.InternalMessageInfo + +func (m *SupplementalGroupsStrategyOptions) Reset() { *m = SupplementalGroupsStrategyOptions{} } +func (*SupplementalGroupsStrategyOptions) ProtoMessage() {} +func (*SupplementalGroupsStrategyOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_af65d9655aa67551, []int{18} +} +func (m *SupplementalGroupsStrategyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SupplementalGroupsStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SupplementalGroupsStrategyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_SupplementalGroupsStrategyOptions.Merge(m, src) +} +func (m *SupplementalGroupsStrategyOptions) XXX_Size() int { + return m.Size() +} +func (m *SupplementalGroupsStrategyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_SupplementalGroupsStrategyOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_SupplementalGroupsStrategyOptions proto.InternalMessageInfo + +func init() { + proto.RegisterType((*AllowedFlexVolume)(nil), "github.com.openshift.api.security.v1.AllowedFlexVolume") + proto.RegisterType((*FSGroupStrategyOptions)(nil), "github.com.openshift.api.security.v1.FSGroupStrategyOptions") + proto.RegisterType((*IDRange)(nil), "github.com.openshift.api.security.v1.IDRange") + proto.RegisterType((*PodSecurityPolicyReview)(nil), "github.com.openshift.api.security.v1.PodSecurityPolicyReview") + proto.RegisterType((*PodSecurityPolicyReviewSpec)(nil), "github.com.openshift.api.security.v1.PodSecurityPolicyReviewSpec") + proto.RegisterType((*PodSecurityPolicyReviewStatus)(nil), "github.com.openshift.api.security.v1.PodSecurityPolicyReviewStatus") + proto.RegisterType((*PodSecurityPolicySelfSubjectReview)(nil), "github.com.openshift.api.security.v1.PodSecurityPolicySelfSubjectReview") + proto.RegisterType((*PodSecurityPolicySelfSubjectReviewSpec)(nil), "github.com.openshift.api.security.v1.PodSecurityPolicySelfSubjectReviewSpec") + proto.RegisterType((*PodSecurityPolicySubjectReview)(nil), "github.com.openshift.api.security.v1.PodSecurityPolicySubjectReview") + proto.RegisterType((*PodSecurityPolicySubjectReviewSpec)(nil), "github.com.openshift.api.security.v1.PodSecurityPolicySubjectReviewSpec") + proto.RegisterType((*PodSecurityPolicySubjectReviewStatus)(nil), "github.com.openshift.api.security.v1.PodSecurityPolicySubjectReviewStatus") + proto.RegisterType((*RangeAllocation)(nil), "github.com.openshift.api.security.v1.RangeAllocation") + proto.RegisterType((*RangeAllocationList)(nil), "github.com.openshift.api.security.v1.RangeAllocationList") + proto.RegisterType((*RunAsUserStrategyOptions)(nil), "github.com.openshift.api.security.v1.RunAsUserStrategyOptions") + proto.RegisterType((*SELinuxContextStrategyOptions)(nil), "github.com.openshift.api.security.v1.SELinuxContextStrategyOptions") + proto.RegisterType((*SecurityContextConstraints)(nil), "github.com.openshift.api.security.v1.SecurityContextConstraints") + proto.RegisterType((*SecurityContextConstraintsList)(nil), "github.com.openshift.api.security.v1.SecurityContextConstraintsList") + proto.RegisterType((*ServiceAccountPodSecurityPolicyReviewStatus)(nil), "github.com.openshift.api.security.v1.ServiceAccountPodSecurityPolicyReviewStatus") + proto.RegisterType((*SupplementalGroupsStrategyOptions)(nil), "github.com.openshift.api.security.v1.SupplementalGroupsStrategyOptions") +} + +func init() { + proto.RegisterFile("github.com/openshift/api/security/v1/generated.proto", fileDescriptor_af65d9655aa67551) +} + +var fileDescriptor_af65d9655aa67551 = []byte{ + // 1750 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x58, 0xcd, 0x6f, 0x1c, 0x49, + 0x15, 0x77, 0x7b, 0xfc, 0x35, 0x65, 0xc7, 0x1f, 0x65, 0xc7, 0xe9, 0x35, 0xeb, 0x19, 0xd3, 0x36, + 0xab, 0x08, 0xd8, 0x19, 0x12, 0x2d, 0x6c, 0xd0, 0xb2, 0xd1, 0x4e, 0x7b, 0xd6, 0x59, 0xaf, 0x9c, + 0x64, 0xb6, 0x66, 0xbd, 0x42, 0xab, 0x15, 0xa2, 0xdc, 0x53, 0x33, 0xae, 0xb8, 0xbf, 0xe8, 0xaa, + 0x76, 0x3c, 0xe2, 0x12, 0x89, 0x0b, 0x47, 0x24, 0xae, 0x88, 0x33, 0xfc, 0x03, 0x5c, 0x10, 0x70, + 0x8d, 0x04, 0x12, 0x39, 0xa1, 0x9c, 0x46, 0x64, 0x10, 0x27, 0x8e, 0xdc, 0x72, 0x42, 0x55, 0x53, + 0xf3, 0xd1, 0x3d, 0xdd, 0xe3, 0x4e, 0x48, 0xa2, 0xbd, 0x4d, 0xbf, 0x8f, 0xdf, 0xef, 0xbd, 0xd7, + 0xaf, 0x5f, 0xbd, 0x1a, 0xf0, 0x5e, 0x8b, 0xf2, 0xd3, 0xf0, 0xa4, 0x64, 0x79, 0x4e, 0xd9, 0xf3, + 0x89, 0xcb, 0x4e, 0x69, 0x93, 0x97, 0xb1, 0x4f, 0xcb, 0x8c, 0x58, 0x61, 0x40, 0x79, 0xbb, 0x7c, + 0x7e, 0xa3, 0xdc, 0x22, 0x2e, 0x09, 0x30, 0x27, 0x8d, 0x92, 0x1f, 0x78, 0xdc, 0x83, 0x7b, 0x43, + 0xaf, 0xd2, 0xc0, 0xab, 0x84, 0x7d, 0x5a, 0xea, 0x7b, 0x95, 0xce, 0x6f, 0x6c, 0xbd, 0x3b, 0x82, + 0xdd, 0xf2, 0x5a, 0x5e, 0x59, 0x3a, 0x9f, 0x84, 0x4d, 0xf9, 0x24, 0x1f, 0xe4, 0xaf, 0x1e, 0xe8, + 0x96, 0x71, 0x76, 0x8b, 0x95, 0xa8, 0x27, 0xc9, 0x2d, 0x2f, 0x20, 0x09, 0xc4, 0x5b, 0xef, 0x0d, + 0x6d, 0x1c, 0x6c, 0x9d, 0x52, 0x97, 0x04, 0xed, 0xb2, 0x7f, 0xd6, 0x12, 0x02, 0x56, 0x76, 0x08, + 0xc7, 0x49, 0x5e, 0x3f, 0x48, 0xf3, 0x0a, 0x42, 0x97, 0x53, 0x87, 0x94, 0x99, 0x75, 0x4a, 0x1c, + 0x1c, 0xf7, 0x33, 0x3e, 0x00, 0x6b, 0x15, 0xdb, 0xf6, 0x1e, 0x92, 0xc6, 0x81, 0x4d, 0x2e, 0xbe, + 0xf0, 0xec, 0xd0, 0x21, 0xf0, 0x1d, 0x30, 0xd7, 0x08, 0xe8, 0x39, 0x09, 0x74, 0x6d, 0x47, 0xbb, + 0x9e, 0x37, 0x97, 0x1f, 0x77, 0x8a, 0x53, 0xdd, 0x4e, 0x71, 0xae, 0x2a, 0xa5, 0x48, 0x69, 0x8d, + 0xdf, 0x69, 0x60, 0xf3, 0xa0, 0x7e, 0x27, 0xf0, 0x42, 0xbf, 0xce, 0x05, 0x6a, 0xab, 0x7d, 0xdf, + 0xe7, 0xd4, 0x73, 0x19, 0x7c, 0x1f, 0xcc, 0xf0, 0xb6, 0x4f, 0x14, 0xc0, 0xae, 0x02, 0x98, 0xf9, + 0xbc, 0xed, 0x93, 0xe7, 0x9d, 0xe2, 0x7a, 0xcc, 0x4b, 0x88, 0x91, 0x74, 0x80, 0xc7, 0x60, 0x2e, + 0xc0, 0x6e, 0x8b, 0x30, 0x7d, 0x7a, 0x27, 0x77, 0x7d, 0xf1, 0xe6, 0xbb, 0xa5, 0x2c, 0x2f, 0xa2, + 0x74, 0x58, 0x45, 0xc2, 0x6b, 0x18, 0xaa, 0x7c, 0x64, 0x48, 0x81, 0x19, 0x77, 0xc0, 0xbc, 0x32, + 0x81, 0xdb, 0x20, 0xe7, 0x50, 0x57, 0x46, 0x96, 0x33, 0x17, 0x95, 0x7d, 0xee, 0x2e, 0x75, 0x91, + 0x90, 0x4b, 0x35, 0xbe, 0xd0, 0xa7, 0x63, 0x6a, 0x7c, 0x81, 0x84, 0xdc, 0xf8, 0x8f, 0x06, 0xae, + 0xd5, 0xbc, 0x46, 0x5d, 0x71, 0xd7, 0x3c, 0x9b, 0x5a, 0x6d, 0x44, 0xce, 0x29, 0x79, 0x08, 0x2d, + 0x30, 0xc3, 0x7c, 0x62, 0x49, 0xe8, 0xc5, 0x9b, 0x95, 0x6c, 0x91, 0xa7, 0x80, 0xd5, 0x7d, 0x62, + 0x99, 0x4b, 0xfd, 0xba, 0x89, 0x27, 0x24, 0xc1, 0xe1, 0x19, 0x98, 0x63, 0x1c, 0xf3, 0x90, 0xc9, + 0x10, 0x17, 0x6f, 0xee, 0xff, 0x7f, 0x34, 0x12, 0x6a, 0x58, 0xb6, 0xde, 0x33, 0x52, 0x14, 0xc6, + 0x1f, 0x35, 0xf0, 0x8d, 0x09, 0x01, 0xc2, 0xcf, 0xc0, 0x02, 0x27, 0x8e, 0x6f, 0x63, 0x4e, 0x54, + 0xd6, 0xbb, 0xa5, 0x5e, 0x27, 0xca, 0x00, 0x44, 0x8f, 0x2b, 0xf2, 0xcf, 0x95, 0x99, 0xcc, 0x6b, + 0x55, 0xd1, 0x2d, 0xf4, 0xa5, 0x68, 0x00, 0x03, 0x0f, 0xc1, 0x3a, 0x23, 0xc1, 0x39, 0xb5, 0x48, + 0xc5, 0xb2, 0xbc, 0xd0, 0xe5, 0xf7, 0xb0, 0xa3, 0xba, 0x21, 0x6f, 0x5e, 0xeb, 0x76, 0x8a, 0xeb, + 0xf5, 0x71, 0x35, 0x4a, 0xf2, 0x31, 0xfe, 0xaa, 0x81, 0xed, 0x89, 0x79, 0xc3, 0xdf, 0x6b, 0x60, + 0x13, 0xf7, 0xfa, 0x3f, 0x8a, 0xca, 0x74, 0x4d, 0xb6, 0xdf, 0x67, 0xd9, 0xaa, 0x1b, 0x75, 0x9e, + 0x5c, 0xeb, 0x82, 0x4a, 0x7e, 0xb3, 0x92, 0x48, 0x8c, 0x52, 0x02, 0x32, 0x7e, 0x39, 0x0d, 0x8c, + 0x31, 0xe4, 0x3a, 0xb1, 0x9b, 0xf5, 0xf0, 0xe4, 0x01, 0xb1, 0xb8, 0x6a, 0x42, 0x37, 0xd2, 0x84, + 0x47, 0x2f, 0xd9, 0x1d, 0x63, 0xb8, 0xa9, 0xfd, 0x18, 0xc4, 0xfa, 0xf1, 0xd3, 0x97, 0x65, 0x8c, + 0xb0, 0x4d, 0x6e, 0xcb, 0x9f, 0x83, 0x77, 0xb2, 0x45, 0xfc, 0x1a, 0x1a, 0xd4, 0x78, 0x34, 0x0d, + 0x0a, 0x93, 0xa3, 0x87, 0x0f, 0x22, 0xef, 0xe0, 0x93, 0x57, 0x52, 0x91, 0xaf, 0x53, 0xfd, 0xff, + 0xa4, 0x25, 0xb5, 0xe2, 0x1b, 0x28, 0x3e, 0xdc, 0x01, 0x33, 0x21, 0x23, 0x81, 0xcc, 0x35, 0x3f, + 0xac, 0xc7, 0x31, 0x23, 0x01, 0x92, 0x1a, 0x68, 0x80, 0xb9, 0x96, 0x38, 0x5b, 0x98, 0x9e, 0x93, + 0x23, 0x03, 0x88, 0xf8, 0xe5, 0x69, 0xc3, 0x90, 0xd2, 0x18, 0xff, 0xd5, 0xc0, 0x5e, 0x96, 0x02, + 0xc0, 0x1a, 0xc8, 0xab, 0xaf, 0xd1, 0x6c, 0x4f, 0x4a, 0xe1, 0xbe, 0x72, 0x6d, 0x92, 0x80, 0xb8, + 0x16, 0x31, 0xaf, 0x74, 0x3b, 0xc5, 0x7c, 0xa5, 0xef, 0x89, 0x86, 0x20, 0xe2, 0x6c, 0x0d, 0x08, + 0x66, 0x9e, 0xab, 0x52, 0x18, 0x1e, 0x58, 0x52, 0x8a, 0x94, 0x36, 0x52, 0xbb, 0xdc, 0xab, 0x69, + 0xdc, 0x3f, 0x68, 0x60, 0x45, 0x1e, 0x81, 0x22, 0x30, 0x0b, 0x8b, 0x83, 0x1a, 0xfe, 0x14, 0x2c, + 0x88, 0x95, 0xa2, 0x81, 0x39, 0x56, 0xf9, 0x7d, 0x6f, 0x84, 0x66, 0xb0, 0x4a, 0x94, 0xfc, 0xb3, + 0x96, 0x10, 0xb0, 0x92, 0xb0, 0x1e, 0x66, 0x7c, 0x97, 0x70, 0x6c, 0x42, 0xc5, 0x09, 0x86, 0x32, + 0x34, 0x40, 0x85, 0xbb, 0x60, 0x56, 0x9e, 0xc1, 0x2a, 0xdf, 0x2b, 0xca, 0x78, 0x56, 0x46, 0x82, + 0x7a, 0x3a, 0xf8, 0x36, 0x98, 0x91, 0x21, 0x88, 0x4c, 0x97, 0xcc, 0x05, 0xf1, 0x4a, 0xab, 0x98, + 0x63, 0x24, 0xa5, 0xc6, 0xdf, 0x35, 0xb0, 0x1e, 0x0b, 0xfc, 0x88, 0x32, 0x0e, 0xbf, 0x1a, 0x0b, + 0xbe, 0x94, 0x2d, 0x78, 0xe1, 0x2d, 0x43, 0x1f, 0x94, 0xab, 0x2f, 0x19, 0x09, 0xfc, 0x4b, 0x30, + 0x4b, 0x39, 0x71, 0xfa, 0x8b, 0xc8, 0xf7, 0xb3, 0x7d, 0x57, 0xb1, 0x38, 0x87, 0xf9, 0x1e, 0x0a, + 0x2c, 0xd4, 0x83, 0x34, 0xfe, 0xa1, 0x01, 0x1d, 0x85, 0x6e, 0x85, 0x89, 0xc6, 0x8d, 0xef, 0x4e, + 0x3f, 0x8c, 0xec, 0x4e, 0xdf, 0x8a, 0xed, 0x4e, 0x57, 0xc7, 0xfc, 0x46, 0xb6, 0xa7, 0xb7, 0x40, + 0x2e, 0xa4, 0x0d, 0xb5, 0xbc, 0xcc, 0x8b, 0xc5, 0xe5, 0xf8, 0xb0, 0x8a, 0x84, 0x0c, 0xde, 0x00, + 0x8b, 0x21, 0x6d, 0xc8, 0xf0, 0xee, 0x52, 0x57, 0x56, 0x3a, 0x67, 0xae, 0x74, 0x3b, 0xc5, 0xc5, + 0x63, 0xb5, 0x19, 0x89, 0x15, 0x68, 0xd4, 0x26, 0xe2, 0x82, 0x2f, 0xf4, 0x99, 0x04, 0x17, 0x7c, + 0x81, 0x46, 0x6d, 0x8c, 0xbf, 0x68, 0x60, 0xbb, 0xfe, 0xf1, 0x11, 0x75, 0xc3, 0x8b, 0x7d, 0xcf, + 0xe5, 0xe4, 0x82, 0xc7, 0xb3, 0xbb, 0x1d, 0xc9, 0xee, 0xdb, 0xb1, 0xec, 0xb6, 0x92, 0x9d, 0x47, + 0x52, 0xfc, 0x09, 0x58, 0x66, 0x44, 0xda, 0x28, 0x44, 0x35, 0xf7, 0x8c, 0xa4, 0xcf, 0x43, 0xa1, + 0x29, 0x4b, 0x13, 0x76, 0x3b, 0xc5, 0xe5, 0xa8, 0x0c, 0xc5, 0xd0, 0x8c, 0xdf, 0xac, 0x81, 0xad, + 0xfe, 0x60, 0x50, 0x51, 0xec, 0x7b, 0x2e, 0xe3, 0x01, 0xa6, 0x2e, 0x67, 0x6f, 0xe0, 0x83, 0xb9, + 0x0e, 0x16, 0xfc, 0x80, 0x7a, 0x82, 0x5f, 0xa6, 0x36, 0x6b, 0x2e, 0x89, 0x0e, 0xad, 0x29, 0x19, + 0x1a, 0x68, 0xe1, 0x57, 0x40, 0x97, 0x83, 0xa5, 0x16, 0xd0, 0x73, 0x6a, 0x93, 0x16, 0x69, 0x88, + 0x80, 0xb1, 0x08, 0x40, 0xbe, 0xdf, 0x05, 0x73, 0x47, 0x31, 0xe9, 0x95, 0x14, 0x3b, 0x94, 0x8a, + 0x00, 0x19, 0xd8, 0x6c, 0x90, 0x26, 0x0e, 0x6d, 0x5e, 0x69, 0x34, 0xf6, 0xb1, 0x8f, 0x4f, 0xa8, + 0x4d, 0x39, 0x25, 0x4c, 0x9f, 0x91, 0x83, 0xf5, 0x03, 0xb1, 0xc3, 0x54, 0x13, 0x2d, 0x9e, 0x77, + 0x8a, 0xdb, 0xe3, 0x57, 0x9d, 0xd2, 0xc0, 0xa4, 0x8d, 0x52, 0xa0, 0x61, 0x1b, 0xe8, 0x01, 0xf9, + 0x59, 0x48, 0x03, 0xd2, 0xa8, 0x06, 0x9e, 0x1f, 0xa1, 0x9d, 0x95, 0xb4, 0x1f, 0x8a, 0x74, 0x50, + 0x8a, 0xcd, 0xe5, 0xc4, 0xa9, 0xf0, 0xf0, 0x01, 0x58, 0x57, 0x63, 0x3a, 0xc2, 0x3a, 0x27, 0x59, + 0x6f, 0x89, 0xc5, 0xb3, 0x32, 0xae, 0xbe, 0x9c, 0x30, 0x09, 0x74, 0xf0, 0xe6, 0x3e, 0xf1, 0x18, + 0xaf, 0xd2, 0xa0, 0x77, 0xef, 0xaa, 0xd9, 0x61, 0x8b, 0xba, 0xfa, 0x7c, 0xc2, 0x9b, 0x4b, 0xb0, + 0x43, 0xa9, 0x08, 0xb0, 0x0c, 0xe6, 0xcf, 0xe5, 0x33, 0xd3, 0x17, 0x64, 0xf4, 0x57, 0xbb, 0x9d, + 0xe2, 0x7c, 0xcf, 0x44, 0x44, 0x3c, 0x77, 0x50, 0x97, 0x1f, 0x54, 0xdf, 0x0a, 0xfe, 0x42, 0x03, + 0x10, 0xc7, 0xaf, 0x81, 0x4c, 0xbf, 0x2a, 0x07, 0xdf, 0xfb, 0xd9, 0x06, 0xdf, 0xd8, 0x35, 0xd2, + 0xdc, 0x52, 0x29, 0xc0, 0x31, 0x15, 0x43, 0x09, 0x74, 0xb0, 0x0a, 0x56, 0x07, 0x29, 0xdd, 0x23, + 0xfc, 0xa1, 0x17, 0x9c, 0xe9, 0x79, 0x59, 0x0c, 0x5d, 0x21, 0xad, 0x56, 0x62, 0x7a, 0x34, 0xe6, + 0x01, 0x6f, 0x83, 0xe5, 0x81, 0xac, 0xe6, 0x05, 0x9c, 0xe9, 0x40, 0x62, 0x6c, 0x2a, 0x8c, 0xe5, + 0x4a, 0x44, 0x8b, 0x62, 0xd6, 0xf0, 0x16, 0x58, 0x1a, 0x4a, 0x0e, 0xab, 0xfa, 0xa2, 0xf4, 0xde, + 0x50, 0xde, 0x4b, 0x95, 0x11, 0x1d, 0x8a, 0x58, 0x46, 0x3c, 0x0f, 0x6b, 0xfb, 0xfa, 0x52, 0x8a, + 0xe7, 0x61, 0x6d, 0x1f, 0x45, 0x2c, 0xa1, 0x03, 0x8a, 0xfd, 0xef, 0x21, 0xf2, 0x35, 0x7e, 0xcc, + 0x2c, 0x6c, 0xcb, 0x73, 0x44, 0xdf, 0x94, 0x60, 0xbb, 0xdd, 0x4e, 0xb1, 0x58, 0x9d, 0x6c, 0x8a, + 0x2e, 0xc3, 0x82, 0x3f, 0x8e, 0xcf, 0x8d, 0x11, 0x9e, 0x6b, 0x92, 0xe7, 0xed, 0xf1, 0x99, 0x31, + 0x42, 0x90, 0xea, 0x2d, 0x1a, 0xa9, 0x3f, 0x4f, 0xd5, 0xec, 0xd4, 0xaf, 0xbc, 0xc8, 0x2d, 0x75, + 0xe2, 0xd1, 0x31, 0x7c, 0x85, 0x51, 0x33, 0x14, 0xa3, 0x84, 0x1e, 0xc8, 0x07, 0xfd, 0x43, 0x52, + 0x5f, 0x96, 0xfc, 0xb7, 0x33, 0x9e, 0xde, 0x29, 0x67, 0xb2, 0xb9, 0xa6, 0xa8, 0xf3, 0x03, 0x0b, + 0x34, 0xe4, 0x80, 0xbf, 0xd6, 0x00, 0x64, 0xa1, 0xef, 0xdb, 0xc4, 0x21, 0x2e, 0xc7, 0x76, 0x6f, + 0xdd, 0xd4, 0x57, 0x24, 0xf5, 0x9d, 0x8c, 0xa9, 0x8f, 0xf9, 0xc7, 0x63, 0x18, 0x7c, 0x4f, 0xe3, + 0xa6, 0x28, 0x81, 0x1e, 0xb6, 0xc0, 0x7c, 0x93, 0xc9, 0xdf, 0xfa, 0xaa, 0x8c, 0xe4, 0x47, 0xd9, + 0x22, 0x49, 0xfe, 0x4b, 0xc7, 0x5c, 0x51, 0xf4, 0xf3, 0x4a, 0x8f, 0xfa, 0xe8, 0xf0, 0x0b, 0xb0, + 0x19, 0x10, 0xdc, 0xb8, 0xef, 0xda, 0x6d, 0xe4, 0x79, 0xfc, 0x80, 0xda, 0x84, 0xb5, 0x19, 0x27, + 0x8e, 0xbe, 0x26, 0xbb, 0x69, 0x70, 0xe3, 0x45, 0x89, 0x56, 0x28, 0xc5, 0x1b, 0x16, 0xc1, 0xac, + 0x58, 0xe9, 0x99, 0x0e, 0xe5, 0x14, 0xcb, 0x8b, 0x35, 0x4a, 0xd4, 0x9b, 0xa1, 0x9e, 0x7c, 0x64, + 0xd7, 0x5f, 0x4f, 0xdb, 0xf5, 0xe1, 0x87, 0x60, 0x85, 0x11, 0xcb, 0xf2, 0x1c, 0xbf, 0x16, 0x78, + 0x4d, 0x01, 0xae, 0x6f, 0x48, 0xe3, 0xf5, 0x6e, 0xa7, 0xb8, 0x52, 0x8f, 0xaa, 0x50, 0xdc, 0x16, + 0x1e, 0x81, 0x0d, 0x35, 0xaa, 0x8e, 0x5d, 0x86, 0x9b, 0xa4, 0xde, 0x66, 0x16, 0xb7, 0x99, 0xae, + 0x4b, 0x0c, 0xbd, 0xdb, 0x29, 0x6e, 0x54, 0x12, 0xf4, 0x28, 0xd1, 0x0b, 0x7e, 0x04, 0x56, 0x9b, + 0x5e, 0x70, 0x42, 0x1b, 0x0d, 0xe2, 0xf6, 0x91, 0xde, 0x92, 0x48, 0x1b, 0x62, 0xbc, 0x1d, 0xc4, + 0x74, 0x68, 0xcc, 0xda, 0xf8, 0xb7, 0x06, 0x0a, 0xe9, 0xeb, 0xc9, 0x1b, 0x58, 0x8b, 0x49, 0x74, + 0x2d, 0xfe, 0x28, 0xeb, 0x1f, 0x24, 0x69, 0x21, 0xa7, 0x6c, 0xc8, 0xbf, 0x9d, 0x06, 0xdf, 0x79, + 0x81, 0x7f, 0x55, 0xe0, 0xdf, 0x34, 0xb0, 0xe7, 0x67, 0xb8, 0xd2, 0xa9, 0x8a, 0xbc, 0xca, 0x5b, + 0xf2, 0x77, 0x55, 0x02, 0x99, 0xae, 0x94, 0x28, 0x53, 0x94, 0xe2, 0x9e, 0xeb, 0x62, 0x87, 0xc4, + 0xef, 0xb9, 0xf7, 0xb0, 0x43, 0x90, 0xd4, 0x18, 0x7f, 0xd6, 0xc0, 0x37, 0x2f, 0x9d, 0x19, 0xd0, + 0x8c, 0x6c, 0xdb, 0xa5, 0xd8, 0xb6, 0x5d, 0x48, 0x07, 0x78, 0xed, 0x7f, 0xc9, 0x9a, 0x9f, 0x3e, + 0x7e, 0x56, 0x98, 0x7a, 0xf2, 0xac, 0x30, 0xf5, 0xf4, 0x59, 0x61, 0xea, 0x51, 0xb7, 0xa0, 0x3d, + 0xee, 0x16, 0xb4, 0x27, 0xdd, 0x82, 0xf6, 0xb4, 0x5b, 0xd0, 0xfe, 0xd9, 0x2d, 0x68, 0xbf, 0xfa, + 0x57, 0x61, 0xea, 0xcb, 0xbd, 0x2c, 0xff, 0xde, 0xff, 0x2f, 0x00, 0x00, 0xff, 0xff, 0xc3, 0x31, + 0x4b, 0x4e, 0xe4, 0x17, 0x00, 0x00, +} + +func (m *AllowedFlexVolume) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AllowedFlexVolume) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AllowedFlexVolume) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Driver) + copy(dAtA[i:], m.Driver) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *FSGroupStrategyOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FSGroupStrategyOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FSGroupStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Ranges) > 0 { + for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *IDRange) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IDRange) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IDRange) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.Max)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.Min)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *PodSecurityPolicyReview) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodSecurityPolicyReview) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodSecurityPolicyReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodSecurityPolicyReviewSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodSecurityPolicyReviewSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodSecurityPolicyReviewSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ServiceAccountNames) > 0 { + for iNdEx := len(m.ServiceAccountNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ServiceAccountNames[iNdEx]) + copy(dAtA[i:], m.ServiceAccountNames[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceAccountNames[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodSecurityPolicyReviewStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodSecurityPolicyReviewStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodSecurityPolicyReviewStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.AllowedServiceAccounts) > 0 { + for iNdEx := len(m.AllowedServiceAccounts) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AllowedServiceAccounts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *PodSecurityPolicySelfSubjectReview) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodSecurityPolicySelfSubjectReview) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodSecurityPolicySelfSubjectReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodSecurityPolicySelfSubjectReviewSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodSecurityPolicySelfSubjectReviewSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodSecurityPolicySelfSubjectReviewSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodSecurityPolicySubjectReview) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodSecurityPolicySubjectReview) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodSecurityPolicySubjectReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodSecurityPolicySubjectReviewSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodSecurityPolicySubjectReviewSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodSecurityPolicySubjectReviewSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Groups) > 0 { + for iNdEx := len(m.Groups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Groups[iNdEx]) + copy(dAtA[i:], m.Groups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Groups[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.User) + copy(dAtA[i:], m.User) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.User))) + i-- + dAtA[i] = 0x12 + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodSecurityPolicySubjectReviewStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodSecurityPolicySubjectReviewStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodSecurityPolicySubjectReviewStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x12 + if m.AllowedBy != nil { + { + size, err := m.AllowedBy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RangeAllocation) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RangeAllocation) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RangeAllocation) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Data != nil { + i -= len(m.Data) + copy(dAtA[i:], m.Data) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Data))) + i-- + dAtA[i] = 0x1a + } + i -= len(m.Range) + copy(dAtA[i:], m.Range) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Range))) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RangeAllocationList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RangeAllocationList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RangeAllocationList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RunAsUserStrategyOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RunAsUserStrategyOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RunAsUserStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.UIDRangeMax != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.UIDRangeMax)) + i-- + dAtA[i] = 0x20 + } + if m.UIDRangeMin != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.UIDRangeMin)) + i-- + dAtA[i] = 0x18 + } + if m.UID != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.UID)) + i-- + dAtA[i] = 0x10 + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SELinuxContextStrategyOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SELinuxContextStrategyOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SELinuxContextStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.SELinuxOptions != nil { + { + size, err := m.SELinuxOptions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SecurityContextConstraints) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SecurityContextConstraints) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SecurityContextConstraints) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ForbiddenSysctls) > 0 { + for iNdEx := len(m.ForbiddenSysctls) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ForbiddenSysctls[iNdEx]) + copy(dAtA[i:], m.ForbiddenSysctls[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ForbiddenSysctls[iNdEx]))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xca + } + } + if len(m.AllowedUnsafeSysctls) > 0 { + for iNdEx := len(m.AllowedUnsafeSysctls) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AllowedUnsafeSysctls[iNdEx]) + copy(dAtA[i:], m.AllowedUnsafeSysctls[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllowedUnsafeSysctls[iNdEx]))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc2 + } + } + if m.AllowPrivilegeEscalation != nil { + i-- + if *m.AllowPrivilegeEscalation { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb8 + } + if m.DefaultAllowPrivilegeEscalation != nil { + i-- + if *m.DefaultAllowPrivilegeEscalation { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb0 + } + if len(m.AllowedFlexVolumes) > 0 { + for iNdEx := len(m.AllowedFlexVolumes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AllowedFlexVolumes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xaa + } + } + if len(m.SeccompProfiles) > 0 { + for iNdEx := len(m.SeccompProfiles) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.SeccompProfiles[iNdEx]) + copy(dAtA[i:], m.SeccompProfiles[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SeccompProfiles[iNdEx]))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa2 + } + } + if len(m.Groups) > 0 { + for iNdEx := len(m.Groups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Groups[iNdEx]) + copy(dAtA[i:], m.Groups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Groups[iNdEx]))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x9a + } + } + if len(m.Users) > 0 { + for iNdEx := len(m.Users) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Users[iNdEx]) + copy(dAtA[i:], m.Users[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Users[iNdEx]))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x92 + } + } + i-- + if m.ReadOnlyRootFilesystem { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x88 + { + size, err := m.FSGroup.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + { + size, err := m.SupplementalGroups.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x7a + { + size, err := m.RunAsUser.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x72 + { + size, err := m.SELinuxContext.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x6a + i-- + if m.AllowHostIPC { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x60 + i-- + if m.AllowHostPID { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x58 + i-- + if m.AllowHostPorts { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x50 + i-- + if m.AllowHostNetwork { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x48 + if len(m.Volumes) > 0 { + for iNdEx := len(m.Volumes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Volumes[iNdEx]) + copy(dAtA[i:], m.Volumes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Volumes[iNdEx]))) + i-- + dAtA[i] = 0x42 + } + } + i-- + if m.AllowHostDirVolumePlugin { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 + if len(m.AllowedCapabilities) > 0 { + for iNdEx := len(m.AllowedCapabilities) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AllowedCapabilities[iNdEx]) + copy(dAtA[i:], m.AllowedCapabilities[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllowedCapabilities[iNdEx]))) + i-- + dAtA[i] = 0x32 + } + } + if len(m.RequiredDropCapabilities) > 0 { + for iNdEx := len(m.RequiredDropCapabilities) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.RequiredDropCapabilities[iNdEx]) + copy(dAtA[i:], m.RequiredDropCapabilities[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RequiredDropCapabilities[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + if len(m.DefaultAddCapabilities) > 0 { + for iNdEx := len(m.DefaultAddCapabilities) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.DefaultAddCapabilities[iNdEx]) + copy(dAtA[i:], m.DefaultAddCapabilities[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DefaultAddCapabilities[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + i-- + if m.AllowPrivilegedContainer { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + if m.Priority != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Priority)) + i-- + dAtA[i] = 0x10 + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SecurityContextConstraintsList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SecurityContextConstraintsList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SecurityContextConstraintsList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ServiceAccountPodSecurityPolicyReviewStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceAccountPodSecurityPolicyReviewStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServiceAccountPodSecurityPolicyReviewStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + { + size, err := m.PodSecurityPolicySubjectReviewStatus.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SupplementalGroupsStrategyOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SupplementalGroupsStrategyOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SupplementalGroupsStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Ranges) > 0 { + for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *AllowedFlexVolume) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Driver) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *FSGroupStrategyOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Ranges) > 0 { + for _, e := range m.Ranges { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *IDRange) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Min)) + n += 1 + sovGenerated(uint64(m.Max)) + return n +} + +func (m *PodSecurityPolicyReview) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodSecurityPolicyReviewSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.ServiceAccountNames) > 0 { + for _, s := range m.ServiceAccountNames { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodSecurityPolicyReviewStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.AllowedServiceAccounts) > 0 { + for _, e := range m.AllowedServiceAccounts { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodSecurityPolicySelfSubjectReview) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodSecurityPolicySelfSubjectReviewSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodSecurityPolicySubjectReview) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodSecurityPolicySubjectReviewSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.User) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Groups) > 0 { + for _, s := range m.Groups { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodSecurityPolicySubjectReviewStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.AllowedBy != nil { + l = m.AllowedBy.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *RangeAllocation) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Range) + n += 1 + l + sovGenerated(uint64(l)) + if m.Data != nil { + l = len(m.Data) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *RangeAllocationList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *RunAsUserStrategyOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.UID != nil { + n += 1 + sovGenerated(uint64(*m.UID)) + } + if m.UIDRangeMin != nil { + n += 1 + sovGenerated(uint64(*m.UIDRangeMin)) + } + if m.UIDRangeMax != nil { + n += 1 + sovGenerated(uint64(*m.UIDRangeMax)) + } + return n +} + +func (m *SELinuxContextStrategyOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.SELinuxOptions != nil { + l = m.SELinuxOptions.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *SecurityContextConstraints) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Priority != nil { + n += 1 + sovGenerated(uint64(*m.Priority)) + } + n += 2 + if len(m.DefaultAddCapabilities) > 0 { + for _, s := range m.DefaultAddCapabilities { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.RequiredDropCapabilities) > 0 { + for _, s := range m.RequiredDropCapabilities { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.AllowedCapabilities) > 0 { + for _, s := range m.AllowedCapabilities { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 2 + if len(m.Volumes) > 0 { + for _, s := range m.Volumes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 2 + n += 2 + n += 2 + n += 2 + l = m.SELinuxContext.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.RunAsUser.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.SupplementalGroups.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.FSGroup.Size() + n += 2 + l + sovGenerated(uint64(l)) + n += 3 + if len(m.Users) > 0 { + for _, s := range m.Users { + l = len(s) + n += 2 + l + sovGenerated(uint64(l)) + } + } + if len(m.Groups) > 0 { + for _, s := range m.Groups { + l = len(s) + n += 2 + l + sovGenerated(uint64(l)) + } + } + if len(m.SeccompProfiles) > 0 { + for _, s := range m.SeccompProfiles { + l = len(s) + n += 2 + l + sovGenerated(uint64(l)) + } + } + if len(m.AllowedFlexVolumes) > 0 { + for _, e := range m.AllowedFlexVolumes { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + if m.DefaultAllowPrivilegeEscalation != nil { + n += 3 + } + if m.AllowPrivilegeEscalation != nil { + n += 3 + } + if len(m.AllowedUnsafeSysctls) > 0 { + for _, s := range m.AllowedUnsafeSysctls { + l = len(s) + n += 2 + l + sovGenerated(uint64(l)) + } + } + if len(m.ForbiddenSysctls) > 0 { + for _, s := range m.ForbiddenSysctls { + l = len(s) + n += 2 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *SecurityContextConstraintsList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ServiceAccountPodSecurityPolicyReviewStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.PodSecurityPolicySubjectReviewStatus.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SupplementalGroupsStrategyOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Ranges) > 0 { + for _, e := range m.Ranges { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *AllowedFlexVolume) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AllowedFlexVolume{`, + `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, + `}`, + }, "") + return s +} +func (this *FSGroupStrategyOptions) String() string { + if this == nil { + return "nil" + } + repeatedStringForRanges := "[]IDRange{" + for _, f := range this.Ranges { + repeatedStringForRanges += strings.Replace(strings.Replace(f.String(), "IDRange", "IDRange", 1), `&`, ``, 1) + "," + } + repeatedStringForRanges += "}" + s := strings.Join([]string{`&FSGroupStrategyOptions{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Ranges:` + repeatedStringForRanges + `,`, + `}`, + }, "") + return s +} +func (this *IDRange) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IDRange{`, + `Min:` + fmt.Sprintf("%v", this.Min) + `,`, + `Max:` + fmt.Sprintf("%v", this.Max) + `,`, + `}`, + }, "") + return s +} +func (this *PodSecurityPolicyReview) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodSecurityPolicyReview{`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodSecurityPolicyReviewSpec", "PodSecurityPolicyReviewSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PodSecurityPolicyReviewStatus", "PodSecurityPolicyReviewStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodSecurityPolicyReviewSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodSecurityPolicyReviewSpec{`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `ServiceAccountNames:` + fmt.Sprintf("%v", this.ServiceAccountNames) + `,`, + `}`, + }, "") + return s +} +func (this *PodSecurityPolicyReviewStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForAllowedServiceAccounts := "[]ServiceAccountPodSecurityPolicyReviewStatus{" + for _, f := range this.AllowedServiceAccounts { + repeatedStringForAllowedServiceAccounts += strings.Replace(strings.Replace(f.String(), "ServiceAccountPodSecurityPolicyReviewStatus", "ServiceAccountPodSecurityPolicyReviewStatus", 1), `&`, ``, 1) + "," + } + repeatedStringForAllowedServiceAccounts += "}" + s := strings.Join([]string{`&PodSecurityPolicyReviewStatus{`, + `AllowedServiceAccounts:` + repeatedStringForAllowedServiceAccounts + `,`, + `}`, + }, "") + return s +} +func (this *PodSecurityPolicySelfSubjectReview) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodSecurityPolicySelfSubjectReview{`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodSecurityPolicySelfSubjectReviewSpec", "PodSecurityPolicySelfSubjectReviewSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PodSecurityPolicySubjectReviewStatus", "PodSecurityPolicySubjectReviewStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodSecurityPolicySelfSubjectReviewSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodSecurityPolicySelfSubjectReviewSpec{`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodSecurityPolicySubjectReview) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodSecurityPolicySubjectReview{`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodSecurityPolicySubjectReviewSpec", "PodSecurityPolicySubjectReviewSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PodSecurityPolicySubjectReviewStatus", "PodSecurityPolicySubjectReviewStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodSecurityPolicySubjectReviewSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodSecurityPolicySubjectReviewSpec{`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `User:` + fmt.Sprintf("%v", this.User) + `,`, + `Groups:` + fmt.Sprintf("%v", this.Groups) + `,`, + `}`, + }, "") + return s +} +func (this *PodSecurityPolicySubjectReviewStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodSecurityPolicySubjectReviewStatus{`, + `AllowedBy:` + strings.Replace(fmt.Sprintf("%v", this.AllowedBy), "ObjectReference", "v1.ObjectReference", 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *RangeAllocation) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RangeAllocation{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Range:` + fmt.Sprintf("%v", this.Range) + `,`, + `Data:` + valueToStringGenerated(this.Data) + `,`, + `}`, + }, "") + return s +} +func (this *RangeAllocationList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]RangeAllocation{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "RangeAllocation", "RangeAllocation", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&RangeAllocationList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *RunAsUserStrategyOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RunAsUserStrategyOptions{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `UID:` + valueToStringGenerated(this.UID) + `,`, + `UIDRangeMin:` + valueToStringGenerated(this.UIDRangeMin) + `,`, + `UIDRangeMax:` + valueToStringGenerated(this.UIDRangeMax) + `,`, + `}`, + }, "") + return s +} +func (this *SELinuxContextStrategyOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SELinuxContextStrategyOptions{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `SELinuxOptions:` + strings.Replace(fmt.Sprintf("%v", this.SELinuxOptions), "SELinuxOptions", "v1.SELinuxOptions", 1) + `,`, + `}`, + }, "") + return s +} +func (this *SecurityContextConstraints) String() string { + if this == nil { + return "nil" + } + repeatedStringForAllowedFlexVolumes := "[]AllowedFlexVolume{" + for _, f := range this.AllowedFlexVolumes { + repeatedStringForAllowedFlexVolumes += strings.Replace(strings.Replace(f.String(), "AllowedFlexVolume", "AllowedFlexVolume", 1), `&`, ``, 1) + "," + } + repeatedStringForAllowedFlexVolumes += "}" + s := strings.Join([]string{`&SecurityContextConstraints{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v11.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Priority:` + valueToStringGenerated(this.Priority) + `,`, + `AllowPrivilegedContainer:` + fmt.Sprintf("%v", this.AllowPrivilegedContainer) + `,`, + `DefaultAddCapabilities:` + fmt.Sprintf("%v", this.DefaultAddCapabilities) + `,`, + `RequiredDropCapabilities:` + fmt.Sprintf("%v", this.RequiredDropCapabilities) + `,`, + `AllowedCapabilities:` + fmt.Sprintf("%v", this.AllowedCapabilities) + `,`, + `AllowHostDirVolumePlugin:` + fmt.Sprintf("%v", this.AllowHostDirVolumePlugin) + `,`, + `Volumes:` + fmt.Sprintf("%v", this.Volumes) + `,`, + `AllowHostNetwork:` + fmt.Sprintf("%v", this.AllowHostNetwork) + `,`, + `AllowHostPorts:` + fmt.Sprintf("%v", this.AllowHostPorts) + `,`, + `AllowHostPID:` + fmt.Sprintf("%v", this.AllowHostPID) + `,`, + `AllowHostIPC:` + fmt.Sprintf("%v", this.AllowHostIPC) + `,`, + `SELinuxContext:` + strings.Replace(strings.Replace(this.SELinuxContext.String(), "SELinuxContextStrategyOptions", "SELinuxContextStrategyOptions", 1), `&`, ``, 1) + `,`, + `RunAsUser:` + strings.Replace(strings.Replace(this.RunAsUser.String(), "RunAsUserStrategyOptions", "RunAsUserStrategyOptions", 1), `&`, ``, 1) + `,`, + `SupplementalGroups:` + strings.Replace(strings.Replace(this.SupplementalGroups.String(), "SupplementalGroupsStrategyOptions", "SupplementalGroupsStrategyOptions", 1), `&`, ``, 1) + `,`, + `FSGroup:` + strings.Replace(strings.Replace(this.FSGroup.String(), "FSGroupStrategyOptions", "FSGroupStrategyOptions", 1), `&`, ``, 1) + `,`, + `ReadOnlyRootFilesystem:` + fmt.Sprintf("%v", this.ReadOnlyRootFilesystem) + `,`, + `Users:` + fmt.Sprintf("%v", this.Users) + `,`, + `Groups:` + fmt.Sprintf("%v", this.Groups) + `,`, + `SeccompProfiles:` + fmt.Sprintf("%v", this.SeccompProfiles) + `,`, + `AllowedFlexVolumes:` + repeatedStringForAllowedFlexVolumes + `,`, + `DefaultAllowPrivilegeEscalation:` + valueToStringGenerated(this.DefaultAllowPrivilegeEscalation) + `,`, + `AllowPrivilegeEscalation:` + valueToStringGenerated(this.AllowPrivilegeEscalation) + `,`, + `AllowedUnsafeSysctls:` + fmt.Sprintf("%v", this.AllowedUnsafeSysctls) + `,`, + `ForbiddenSysctls:` + fmt.Sprintf("%v", this.ForbiddenSysctls) + `,`, + `}`, + }, "") + return s +} +func (this *SecurityContextConstraintsList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]SecurityContextConstraints{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "SecurityContextConstraints", "SecurityContextConstraints", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&SecurityContextConstraintsList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v11.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ServiceAccountPodSecurityPolicyReviewStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceAccountPodSecurityPolicyReviewStatus{`, + `PodSecurityPolicySubjectReviewStatus:` + strings.Replace(strings.Replace(this.PodSecurityPolicySubjectReviewStatus.String(), "PodSecurityPolicySubjectReviewStatus", "PodSecurityPolicySubjectReviewStatus", 1), `&`, ``, 1) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *SupplementalGroupsStrategyOptions) String() string { + if this == nil { + return "nil" + } + repeatedStringForRanges := "[]IDRange{" + for _, f := range this.Ranges { + repeatedStringForRanges += strings.Replace(strings.Replace(f.String(), "IDRange", "IDRange", 1), `&`, ``, 1) + "," + } + repeatedStringForRanges += "}" + s := strings.Join([]string{`&SupplementalGroupsStrategyOptions{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Ranges:` + repeatedStringForRanges + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *AllowedFlexVolume) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AllowedFlexVolume: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AllowedFlexVolume: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Driver = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FSGroupStrategyOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FSGroupStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = FSGroupStrategyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ranges = append(m.Ranges, IDRange{}) + if err := m.Ranges[len(m.Ranges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IDRange) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IDRange: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IDRange: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Min", wireType) + } + m.Min = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Min |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType) + } + m.Max = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Max |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodSecurityPolicyReview) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodSecurityPolicyReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodSecurityPolicyReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodSecurityPolicyReviewSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodSecurityPolicyReviewSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodSecurityPolicyReviewSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceAccountNames", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceAccountNames = append(m.ServiceAccountNames, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodSecurityPolicyReviewStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodSecurityPolicyReviewStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodSecurityPolicyReviewStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowedServiceAccounts", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AllowedServiceAccounts = append(m.AllowedServiceAccounts, ServiceAccountPodSecurityPolicyReviewStatus{}) + if err := m.AllowedServiceAccounts[len(m.AllowedServiceAccounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodSecurityPolicySelfSubjectReview) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodSecurityPolicySelfSubjectReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodSecurityPolicySelfSubjectReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodSecurityPolicySelfSubjectReviewSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodSecurityPolicySelfSubjectReviewSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodSecurityPolicySelfSubjectReviewSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodSecurityPolicySubjectReview) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodSecurityPolicySubjectReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodSecurityPolicySubjectReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodSecurityPolicySubjectReviewSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodSecurityPolicySubjectReviewSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodSecurityPolicySubjectReviewSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.User = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Groups = append(m.Groups, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodSecurityPolicySubjectReviewStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodSecurityPolicySubjectReviewStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodSecurityPolicySubjectReviewStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowedBy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AllowedBy == nil { + m.AllowedBy = &v1.ObjectReference{} + } + if err := m.AllowedBy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RangeAllocation) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RangeAllocation: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RangeAllocation: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Range", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Range = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RangeAllocationList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RangeAllocationList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RangeAllocationList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, RangeAllocation{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RunAsUserStrategyOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RunAsUserStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = RunAsUserStrategyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.UID = &v + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UIDRangeMin", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.UIDRangeMin = &v + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UIDRangeMax", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.UIDRangeMax = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SELinuxContextStrategyOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SELinuxContextStrategyOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SELinuxContextStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = SELinuxContextStrategyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SELinuxOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SELinuxOptions == nil { + m.SELinuxOptions = &v1.SELinuxOptions{} + } + if err := m.SELinuxOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SecurityContextConstraints) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SecurityContextConstraints: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SecurityContextConstraints: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Priority", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Priority = &v + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowPrivilegedContainer", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AllowPrivilegedContainer = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DefaultAddCapabilities", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DefaultAddCapabilities = append(m.DefaultAddCapabilities, k8s_io_api_core_v1.Capability(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequiredDropCapabilities", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RequiredDropCapabilities = append(m.RequiredDropCapabilities, k8s_io_api_core_v1.Capability(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowedCapabilities", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AllowedCapabilities = append(m.AllowedCapabilities, k8s_io_api_core_v1.Capability(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowHostDirVolumePlugin", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AllowHostDirVolumePlugin = bool(v != 0) + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Volumes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Volumes = append(m.Volumes, FSType(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowHostNetwork", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AllowHostNetwork = bool(v != 0) + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowHostPorts", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AllowHostPorts = bool(v != 0) + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowHostPID", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AllowHostPID = bool(v != 0) + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowHostIPC", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AllowHostIPC = bool(v != 0) + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SELinuxContext", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.SELinuxContext.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RunAsUser", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.RunAsUser.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SupplementalGroups", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.SupplementalGroups.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSGroup", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.FSGroup.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 17: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnlyRootFilesystem", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnlyRootFilesystem = bool(v != 0) + case 18: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Users", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Users = append(m.Users, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 19: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Groups = append(m.Groups, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 20: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SeccompProfiles", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SeccompProfiles = append(m.SeccompProfiles, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 21: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowedFlexVolumes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AllowedFlexVolumes = append(m.AllowedFlexVolumes, AllowedFlexVolume{}) + if err := m.AllowedFlexVolumes[len(m.AllowedFlexVolumes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 22: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DefaultAllowPrivilegeEscalation", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.DefaultAllowPrivilegeEscalation = &b + case 23: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowPrivilegeEscalation", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.AllowPrivilegeEscalation = &b + case 24: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowedUnsafeSysctls", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AllowedUnsafeSysctls = append(m.AllowedUnsafeSysctls, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 25: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ForbiddenSysctls", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ForbiddenSysctls = append(m.ForbiddenSysctls, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SecurityContextConstraintsList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SecurityContextConstraintsList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SecurityContextConstraintsList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, SecurityContextConstraints{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceAccountPodSecurityPolicyReviewStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceAccountPodSecurityPolicyReviewStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceAccountPodSecurityPolicyReviewStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodSecurityPolicySubjectReviewStatus", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.PodSecurityPolicySubjectReviewStatus.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SupplementalGroupsStrategyOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SupplementalGroupsStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = SupplementalGroupsStrategyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ranges = append(m.Ranges, IDRange{}) + if err := m.Ranges[len(m.Ranges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/openshift/api/security/v1/generated.proto b/vendor/github.com/openshift/api/security/v1/generated.proto new file mode 100644 index 000000000..d842079a0 --- /dev/null +++ b/vendor/github.com/openshift/api/security/v1/generated.proto @@ -0,0 +1,380 @@ + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package github.com.openshift.api.security.v1; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "github.com/openshift/api/security/v1"; + +// AllowedFlexVolume represents a single Flexvolume that is allowed to be used. +message AllowedFlexVolume { + // Driver is the name of the Flexvolume driver. + optional string driver = 1; +} + +// FSGroupStrategyOptions defines the strategy type and options used to create the strategy. +message FSGroupStrategyOptions { + // Type is the strategy that will dictate what FSGroup is used in the SecurityContext. + optional string type = 1; + + // Ranges are the allowed ranges of fs groups. If you would like to force a single + // fs group then supply a single range with the same start and end. + repeated IDRange ranges = 2; +} + +// IDRange provides a min/max of an allowed range of IDs. +// TODO: this could be reused for UIDs. +message IDRange { + // Min is the start of the range, inclusive. + optional int64 min = 1; + + // Max is the end of the range, inclusive. + optional int64 max = 2; +} + +// PodSecurityPolicyReview checks which service accounts (not users, since that would be cluster-wide) can create the `PodTemplateSpec` in question. +// +// Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=2 +message PodSecurityPolicyReview { + // spec is the PodSecurityPolicy to check. + optional PodSecurityPolicyReviewSpec spec = 1; + + // status represents the current information/status for the PodSecurityPolicyReview. + optional PodSecurityPolicyReviewStatus status = 2; +} + +// PodSecurityPolicyReviewSpec defines specification for PodSecurityPolicyReview +message PodSecurityPolicyReviewSpec { + // template is the PodTemplateSpec to check. The template.spec.serviceAccountName field is used + // if serviceAccountNames is empty, unless the template.spec.serviceAccountName is empty, + // in which case "default" is used. + // If serviceAccountNames is specified, template.spec.serviceAccountName is ignored. + optional k8s.io.api.core.v1.PodTemplateSpec template = 1; + + // serviceAccountNames is an optional set of ServiceAccounts to run the check with. + // If serviceAccountNames is empty, the template.spec.serviceAccountName is used, + // unless it's empty, in which case "default" is used instead. + // If serviceAccountNames is specified, template.spec.serviceAccountName is ignored. + repeated string serviceAccountNames = 2; +} + +// PodSecurityPolicyReviewStatus represents the status of PodSecurityPolicyReview. +message PodSecurityPolicyReviewStatus { + // allowedServiceAccounts returns the list of service accounts in *this* namespace that have the power to create the PodTemplateSpec. + repeated ServiceAccountPodSecurityPolicyReviewStatus allowedServiceAccounts = 1; +} + +// PodSecurityPolicySelfSubjectReview checks whether this user/SA tuple can create the PodTemplateSpec +// +// Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=2 +message PodSecurityPolicySelfSubjectReview { + // spec defines specification the PodSecurityPolicySelfSubjectReview. + optional PodSecurityPolicySelfSubjectReviewSpec spec = 1; + + // status represents the current information/status for the PodSecurityPolicySelfSubjectReview. + optional PodSecurityPolicySubjectReviewStatus status = 2; +} + +// PodSecurityPolicySelfSubjectReviewSpec contains specification for PodSecurityPolicySelfSubjectReview. +message PodSecurityPolicySelfSubjectReviewSpec { + // template is the PodTemplateSpec to check. + optional k8s.io.api.core.v1.PodTemplateSpec template = 1; +} + +// PodSecurityPolicySubjectReview checks whether a particular user/SA tuple can create the PodTemplateSpec. +// +// Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=2 +message PodSecurityPolicySubjectReview { + // spec defines specification for the PodSecurityPolicySubjectReview. + optional PodSecurityPolicySubjectReviewSpec spec = 1; + + // status represents the current information/status for the PodSecurityPolicySubjectReview. + optional PodSecurityPolicySubjectReviewStatus status = 2; +} + +// PodSecurityPolicySubjectReviewSpec defines specification for PodSecurityPolicySubjectReview +message PodSecurityPolicySubjectReviewSpec { + // template is the PodTemplateSpec to check. If template.spec.serviceAccountName is empty it will not be defaulted. + // If its non-empty, it will be checked. + optional k8s.io.api.core.v1.PodTemplateSpec template = 1; + + // user is the user you're testing for. + // If you specify "user" but not "group", then is it interpreted as "What if user were not a member of any groups. + // If user and groups are empty, then the check is performed using *only* the serviceAccountName in the template. + optional string user = 2; + + // groups is the groups you're testing for. + repeated string groups = 3; +} + +// PodSecurityPolicySubjectReviewStatus contains information/status for PodSecurityPolicySubjectReview. +message PodSecurityPolicySubjectReviewStatus { + // allowedBy is a reference to the rule that allows the PodTemplateSpec. + // A rule can be a SecurityContextConstraint or a PodSecurityPolicy + // A `nil`, indicates that it was denied. + optional k8s.io.api.core.v1.ObjectReference allowedBy = 1; + + // A machine-readable description of why this operation is in the + // "Failure" status. If this value is empty there + // is no information available. + optional string reason = 2; + + // template is the PodTemplateSpec after the defaulting is applied. + optional k8s.io.api.core.v1.PodTemplateSpec template = 3; +} + +// RangeAllocation is used so we can easily expose a RangeAllocation typed for security group +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +message RangeAllocation { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // range is a string representing a unique label for a range of uids, "1000000000-2000000000/10000". + optional string range = 2; + + // data is a byte array representing the serialized state of a range allocation. It is a bitmap + // with each bit set to one to represent a range is taken. + optional bytes data = 3; +} + +// RangeAllocationList is a list of RangeAllocations objects +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message RangeAllocationList { + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of RangeAllocations. + repeated RangeAllocation items = 2; +} + +// RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy. +message RunAsUserStrategyOptions { + // Type is the strategy that will dictate what RunAsUser is used in the SecurityContext. + optional string type = 1; + + // UID is the user id that containers must run as. Required for the MustRunAs strategy if not using + // namespace/service account allocated uids. + optional int64 uid = 2; + + // UIDRangeMin defines the min value for a strategy that allocates by range. + optional int64 uidRangeMin = 3; + + // UIDRangeMax defines the max value for a strategy that allocates by range. + optional int64 uidRangeMax = 4; +} + +// SELinuxContextStrategyOptions defines the strategy type and any options used to create the strategy. +message SELinuxContextStrategyOptions { + // Type is the strategy that will dictate what SELinux context is used in the SecurityContext. + optional string type = 1; + + // seLinuxOptions required to run as; required for MustRunAs + optional k8s.io.api.core.v1.SELinuxOptions seLinuxOptions = 2; +} + +// SecurityContextConstraints governs the ability to make requests that affect the SecurityContext +// that will be applied to a container. +// For historical reasons SCC was exposed under the core Kubernetes API group. +// That exposure is deprecated and will be removed in a future release - users +// should instead use the security.openshift.io group to manage +// SecurityContextConstraints. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +kubebuilder:printcolumn:name="Priv",type=string,JSONPath=`.allowPrivilegedContainer`,description="Determines if a container can request to be run as privileged" +// +kubebuilder:printcolumn:name="Caps",type=string,JSONPath=`.allowedCapabilities`,description="A list of capabilities that can be requested to add to the container" +// +kubebuilder:printcolumn:name="SELinux",type=string,JSONPath=`.seLinuxContext.type`,description="Strategy that will dictate what labels will be set in the SecurityContext" +// +kubebuilder:printcolumn:name="RunAsUser",type=string,JSONPath=`.runAsUser.type`,description="Strategy that will dictate what RunAsUser is used in the SecurityContext" +// +kubebuilder:printcolumn:name="FSGroup",type=string,JSONPath=`.fsGroup.type`,description="Strategy that will dictate what fs group is used by the SecurityContext" +// +kubebuilder:printcolumn:name="SupGroup",type=string,JSONPath=`.supplementalGroups.type`,description="Strategy that will dictate what supplemental groups are used by the SecurityContext" +// +kubebuilder:printcolumn:name="Priority",type=string,JSONPath=`.priority`,description="Sort order of SCCs" +// +kubebuilder:printcolumn:name="ReadOnlyRootFS",type=string,JSONPath=`.readOnlyRootFilesystem`,description="Force containers to run with a read only root file system" +// +kubebuilder:printcolumn:name="Volumes",type=string,JSONPath=`.volumes`,description="White list of allowed volume plugins" +// +kubebuilder:singular=securitycontextconstraint +// +openshift:compatibility-gen:level=1 +message SecurityContextConstraints { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Priority influences the sort order of SCCs when evaluating which SCCs to try first for + // a given pod request based on access in the Users and Groups fields. The higher the int, the + // higher priority. An unset value is considered a 0 priority. If scores + // for multiple SCCs are equal they will be sorted from most restrictive to + // least restrictive. If both priorities and restrictions are equal the + // SCCs will be sorted by name. + // +nullable + optional int32 priority = 2; + + // AllowPrivilegedContainer determines if a container can request to be run as privileged. + optional bool allowPrivilegedContainer = 3; + + // DefaultAddCapabilities is the default set of capabilities that will be added to the container + // unless the pod spec specifically drops the capability. You may not list a capabiility in both + // DefaultAddCapabilities and RequiredDropCapabilities. + // +nullable + repeated string defaultAddCapabilities = 4; + + // RequiredDropCapabilities are the capabilities that will be dropped from the container. These + // are required to be dropped and cannot be added. + // +nullable + repeated string requiredDropCapabilities = 5; + + // AllowedCapabilities is a list of capabilities that can be requested to add to the container. + // Capabilities in this field maybe added at the pod author's discretion. + // You must not list a capability in both AllowedCapabilities and RequiredDropCapabilities. + // To allow all capabilities you may use '*'. + // +nullable + repeated string allowedCapabilities = 6; + + // AllowHostDirVolumePlugin determines if the policy allow containers to use the HostDir volume plugin + // +k8s:conversion-gen=false + optional bool allowHostDirVolumePlugin = 7; + + // Volumes is a white list of allowed volume plugins. FSType corresponds directly with the field names + // of a VolumeSource (azureFile, configMap, emptyDir). To allow all volumes you may use "*". + // To allow no volumes, set to ["none"]. + // +nullable + repeated string volumes = 8; + + // AllowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all + // Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes + // is allowed in the "Volumes" field. + // +optional + // +nullable + repeated AllowedFlexVolume allowedFlexVolumes = 21; + + // AllowHostNetwork determines if the policy allows the use of HostNetwork in the pod spec. + optional bool allowHostNetwork = 9; + + // AllowHostPorts determines if the policy allows host ports in the containers. + optional bool allowHostPorts = 10; + + // AllowHostPID determines if the policy allows host pid in the containers. + optional bool allowHostPID = 11; + + // AllowHostIPC determines if the policy allows host ipc in the containers. + optional bool allowHostIPC = 12; + + // DefaultAllowPrivilegeEscalation controls the default setting for whether a + // process can gain more privileges than its parent process. + // +optional + // +nullable + optional bool defaultAllowPrivilegeEscalation = 22; + + // AllowPrivilegeEscalation determines if a pod can request to allow + // privilege escalation. If unspecified, defaults to true. + // +optional + // +nullable + optional bool allowPrivilegeEscalation = 23; + + // SELinuxContext is the strategy that will dictate what labels will be set in the SecurityContext. + // +nullable + optional SELinuxContextStrategyOptions seLinuxContext = 13; + + // RunAsUser is the strategy that will dictate what RunAsUser is used in the SecurityContext. + // +nullable + optional RunAsUserStrategyOptions runAsUser = 14; + + // SupplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext. + // +nullable + optional SupplementalGroupsStrategyOptions supplementalGroups = 15; + + // FSGroup is the strategy that will dictate what fs group is used by the SecurityContext. + // +nullable + optional FSGroupStrategyOptions fsGroup = 16; + + // ReadOnlyRootFilesystem when set to true will force containers to run with a read only root file + // system. If the container specifically requests to run with a non-read only root file system + // the SCC should deny the pod. + // If set to false the container may run with a read only root file system if it wishes but it + // will not be forced to. + optional bool readOnlyRootFilesystem = 17; + + // The users who have permissions to use this security context constraints + // +optional + // +nullable + repeated string users = 18; + + // The groups that have permission to use this security context constraints + // +optional + // +nullable + repeated string groups = 19; + + // SeccompProfiles lists the allowed profiles that may be set for the pod or + // container's seccomp annotations. An unset (nil) or empty value means that no profiles may + // be specifid by the pod or container. The wildcard '*' may be used to allow all profiles. When + // used to generate a value for a pod the first non-wildcard profile will be used as + // the default. + // +nullable + repeated string seccompProfiles = 20; + + // AllowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. + // Each entry is either a plain sysctl name or ends in "*" in which case it is considered + // as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. + // Kubelet has to whitelist all allowed unsafe sysctls explicitly to avoid rejection. + // + // Examples: + // e.g. "foo/*" allows "foo/bar", "foo/baz", etc. + // e.g. "foo.*" allows "foo.bar", "foo.baz", etc. + // +optional + // +nullable + repeated string allowedUnsafeSysctls = 24; + + // ForbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. + // Each entry is either a plain sysctl name or ends in "*" in which case it is considered + // as a prefix of forbidden sysctls. Single * means all sysctls are forbidden. + // + // Examples: + // e.g. "foo/*" forbids "foo/bar", "foo/baz", etc. + // e.g. "foo.*" forbids "foo.bar", "foo.baz", etc. + // +optional + // +nullable + repeated string forbiddenSysctls = 25; +} + +// SecurityContextConstraintsList is a list of SecurityContextConstraints objects +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message SecurityContextConstraintsList { + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of security context constraints. + repeated SecurityContextConstraints items = 2; +} + +// ServiceAccountPodSecurityPolicyReviewStatus represents ServiceAccount name and related review status +message ServiceAccountPodSecurityPolicyReviewStatus { + optional PodSecurityPolicySubjectReviewStatus podSecurityPolicySubjectReviewStatus = 1; + + // name contains the allowed and the denied ServiceAccount name + optional string name = 2; +} + +// SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy. +message SupplementalGroupsStrategyOptions { + // Type is the strategy that will dictate what supplemental groups is used in the SecurityContext. + optional string type = 1; + + // Ranges are the allowed ranges of supplemental groups. If you would like to force a single + // supplemental group then supply a single range with the same start and end. + repeated IDRange ranges = 2; +} + diff --git a/vendor/github.com/openshift/api/security/v1/legacy.go b/vendor/github.com/openshift/api/security/v1/legacy.go new file mode 100644 index 000000000..34f609a07 --- /dev/null +++ b/vendor/github.com/openshift/api/security/v1/legacy.go @@ -0,0 +1,25 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + legacyGroupVersion = schema.GroupVersion{Group: "", Version: "v1"} + legacySchemeBuilder = runtime.NewSchemeBuilder(addLegacyKnownTypes, corev1.AddToScheme) + DeprecatedInstallWithoutGroup = legacySchemeBuilder.AddToScheme +) + +func addLegacyKnownTypes(scheme *runtime.Scheme) error { + types := []runtime.Object{ + &SecurityContextConstraints{}, + &SecurityContextConstraintsList{}, + &PodSecurityPolicySubjectReview{}, + &PodSecurityPolicySelfSubjectReview{}, + &PodSecurityPolicyReview{}, + } + scheme.AddKnownTypes(legacyGroupVersion, types...) + return nil +} diff --git a/vendor/github.com/openshift/api/security/v1/register.go b/vendor/github.com/openshift/api/security/v1/register.go new file mode 100644 index 000000000..431c3b539 --- /dev/null +++ b/vendor/github.com/openshift/api/security/v1/register.go @@ -0,0 +1,44 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + GroupName = "security.openshift.io" + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, corev1.AddToScheme) + // Install is a function which adds this version to a scheme + Install = schemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = schemeBuilder.AddToScheme +) + +// Resource generated code relies on this being here, but it logically belongs to the group +// DEPRECATED +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &SecurityContextConstraints{}, + &SecurityContextConstraintsList{}, + &PodSecurityPolicySubjectReview{}, + &PodSecurityPolicySelfSubjectReview{}, + &PodSecurityPolicyReview{}, + &RangeAllocation{}, + &RangeAllocationList{}, + ) + metav1.AddToGroupVersion(scheme, GroupVersion) + return nil +} diff --git a/vendor/github.com/openshift/api/security/v1/stable.securitycontextconstraints.testsuite.yaml b/vendor/github.com/openshift/api/security/v1/stable.securitycontextconstraints.testsuite.yaml new file mode 100644 index 000000000..d663b94c2 --- /dev/null +++ b/vendor/github.com/openshift/api/security/v1/stable.securitycontextconstraints.testsuite.yaml @@ -0,0 +1,36 @@ +apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this +name: "[Stable] SecurityContextConstraints" +crd: 0000_03_security-openshift_01_scc.crd.yaml +tests: + onCreate: + - name: Should be able to create a minimal SecurityContextConstraints + initial: | + apiVersion: security.openshift.io/v1 + kind: SecurityContextConstraints + allowHostDirVolumePlugin: false + allowHostIPC: false + allowHostNetwork: false + allowHostPID: false + allowHostPorts: false + allowPrivilegedContainer: false + allowedCapabilities: [] + defaultAddCapabilities: [] + priority: 0 + readOnlyRootFilesystem: false + requiredDropCapabilities: [] + volumes: [] + expected: | + apiVersion: security.openshift.io/v1 + kind: SecurityContextConstraints + allowHostDirVolumePlugin: false + allowHostIPC: false + allowHostNetwork: false + allowHostPID: false + allowHostPorts: false + allowPrivilegedContainer: false + allowedCapabilities: [] + defaultAddCapabilities: [] + priority: 0 + readOnlyRootFilesystem: false + requiredDropCapabilities: [] + volumes: [] diff --git a/vendor/github.com/openshift/api/security/v1/types.go b/vendor/github.com/openshift/api/security/v1/types.go new file mode 100644 index 000000000..3e208210c --- /dev/null +++ b/vendor/github.com/openshift/api/security/v1/types.go @@ -0,0 +1,468 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// AllowAllCapabilities can be used as a value for the +// SecurityContextConstraints.AllowAllCapabilities field and means that any +// capabilities are allowed to be requested. +var AllowAllCapabilities corev1.Capability = "*" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// SecurityContextConstraints governs the ability to make requests that affect the SecurityContext +// that will be applied to a container. +// For historical reasons SCC was exposed under the core Kubernetes API group. +// That exposure is deprecated and will be removed in a future release - users +// should instead use the security.openshift.io group to manage +// SecurityContextConstraints. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +kubebuilder:printcolumn:name="Priv",type=string,JSONPath=`.allowPrivilegedContainer`,description="Determines if a container can request to be run as privileged" +// +kubebuilder:printcolumn:name="Caps",type=string,JSONPath=`.allowedCapabilities`,description="A list of capabilities that can be requested to add to the container" +// +kubebuilder:printcolumn:name="SELinux",type=string,JSONPath=`.seLinuxContext.type`,description="Strategy that will dictate what labels will be set in the SecurityContext" +// +kubebuilder:printcolumn:name="RunAsUser",type=string,JSONPath=`.runAsUser.type`,description="Strategy that will dictate what RunAsUser is used in the SecurityContext" +// +kubebuilder:printcolumn:name="FSGroup",type=string,JSONPath=`.fsGroup.type`,description="Strategy that will dictate what fs group is used by the SecurityContext" +// +kubebuilder:printcolumn:name="SupGroup",type=string,JSONPath=`.supplementalGroups.type`,description="Strategy that will dictate what supplemental groups are used by the SecurityContext" +// +kubebuilder:printcolumn:name="Priority",type=string,JSONPath=`.priority`,description="Sort order of SCCs" +// +kubebuilder:printcolumn:name="ReadOnlyRootFS",type=string,JSONPath=`.readOnlyRootFilesystem`,description="Force containers to run with a read only root file system" +// +kubebuilder:printcolumn:name="Volumes",type=string,JSONPath=`.volumes`,description="White list of allowed volume plugins" +// +kubebuilder:singular=securitycontextconstraint +// +openshift:compatibility-gen:level=1 +type SecurityContextConstraints struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Priority influences the sort order of SCCs when evaluating which SCCs to try first for + // a given pod request based on access in the Users and Groups fields. The higher the int, the + // higher priority. An unset value is considered a 0 priority. If scores + // for multiple SCCs are equal they will be sorted from most restrictive to + // least restrictive. If both priorities and restrictions are equal the + // SCCs will be sorted by name. + // +nullable + Priority *int32 `json:"priority" protobuf:"varint,2,opt,name=priority"` + + // AllowPrivilegedContainer determines if a container can request to be run as privileged. + AllowPrivilegedContainer bool `json:"allowPrivilegedContainer" protobuf:"varint,3,opt,name=allowPrivilegedContainer"` + // DefaultAddCapabilities is the default set of capabilities that will be added to the container + // unless the pod spec specifically drops the capability. You may not list a capabiility in both + // DefaultAddCapabilities and RequiredDropCapabilities. + // +nullable + DefaultAddCapabilities []corev1.Capability `json:"defaultAddCapabilities" protobuf:"bytes,4,rep,name=defaultAddCapabilities,casttype=Capability"` + // RequiredDropCapabilities are the capabilities that will be dropped from the container. These + // are required to be dropped and cannot be added. + // +nullable + RequiredDropCapabilities []corev1.Capability `json:"requiredDropCapabilities" protobuf:"bytes,5,rep,name=requiredDropCapabilities,casttype=Capability"` + // AllowedCapabilities is a list of capabilities that can be requested to add to the container. + // Capabilities in this field maybe added at the pod author's discretion. + // You must not list a capability in both AllowedCapabilities and RequiredDropCapabilities. + // To allow all capabilities you may use '*'. + // +nullable + AllowedCapabilities []corev1.Capability `json:"allowedCapabilities" protobuf:"bytes,6,rep,name=allowedCapabilities,casttype=Capability"` + // AllowHostDirVolumePlugin determines if the policy allow containers to use the HostDir volume plugin + // +k8s:conversion-gen=false + AllowHostDirVolumePlugin bool `json:"allowHostDirVolumePlugin" protobuf:"varint,7,opt,name=allowHostDirVolumePlugin"` + // Volumes is a white list of allowed volume plugins. FSType corresponds directly with the field names + // of a VolumeSource (azureFile, configMap, emptyDir). To allow all volumes you may use "*". + // To allow no volumes, set to ["none"]. + // +nullable + Volumes []FSType `json:"volumes" protobuf:"bytes,8,rep,name=volumes,casttype=FSType"` + // AllowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all + // Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes + // is allowed in the "Volumes" field. + // +optional + // +nullable + AllowedFlexVolumes []AllowedFlexVolume `json:"allowedFlexVolumes,omitempty" protobuf:"bytes,21,rep,name=allowedFlexVolumes"` + // AllowHostNetwork determines if the policy allows the use of HostNetwork in the pod spec. + AllowHostNetwork bool `json:"allowHostNetwork" protobuf:"varint,9,opt,name=allowHostNetwork"` + // AllowHostPorts determines if the policy allows host ports in the containers. + AllowHostPorts bool `json:"allowHostPorts" protobuf:"varint,10,opt,name=allowHostPorts"` + // AllowHostPID determines if the policy allows host pid in the containers. + AllowHostPID bool `json:"allowHostPID" protobuf:"varint,11,opt,name=allowHostPID"` + // AllowHostIPC determines if the policy allows host ipc in the containers. + AllowHostIPC bool `json:"allowHostIPC" protobuf:"varint,12,opt,name=allowHostIPC"` + // DefaultAllowPrivilegeEscalation controls the default setting for whether a + // process can gain more privileges than its parent process. + // +optional + // +nullable + DefaultAllowPrivilegeEscalation *bool `json:"defaultAllowPrivilegeEscalation,omitempty" protobuf:"varint,22,rep,name=defaultAllowPrivilegeEscalation"` + // AllowPrivilegeEscalation determines if a pod can request to allow + // privilege escalation. If unspecified, defaults to true. + // +optional + // +nullable + AllowPrivilegeEscalation *bool `json:"allowPrivilegeEscalation,omitempty" protobuf:"varint,23,rep,name=allowPrivilegeEscalation"` + // SELinuxContext is the strategy that will dictate what labels will be set in the SecurityContext. + // +nullable + SELinuxContext SELinuxContextStrategyOptions `json:"seLinuxContext,omitempty" protobuf:"bytes,13,opt,name=seLinuxContext"` + // RunAsUser is the strategy that will dictate what RunAsUser is used in the SecurityContext. + // +nullable + RunAsUser RunAsUserStrategyOptions `json:"runAsUser,omitempty" protobuf:"bytes,14,opt,name=runAsUser"` + // SupplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext. + // +nullable + SupplementalGroups SupplementalGroupsStrategyOptions `json:"supplementalGroups,omitempty" protobuf:"bytes,15,opt,name=supplementalGroups"` + // FSGroup is the strategy that will dictate what fs group is used by the SecurityContext. + // +nullable + FSGroup FSGroupStrategyOptions `json:"fsGroup,omitempty" protobuf:"bytes,16,opt,name=fsGroup"` + // ReadOnlyRootFilesystem when set to true will force containers to run with a read only root file + // system. If the container specifically requests to run with a non-read only root file system + // the SCC should deny the pod. + // If set to false the container may run with a read only root file system if it wishes but it + // will not be forced to. + ReadOnlyRootFilesystem bool `json:"readOnlyRootFilesystem" protobuf:"varint,17,opt,name=readOnlyRootFilesystem"` + + // The users who have permissions to use this security context constraints + // +optional + // +nullable + Users []string `json:"users" protobuf:"bytes,18,rep,name=users"` + // The groups that have permission to use this security context constraints + // +optional + // +nullable + Groups []string `json:"groups" protobuf:"bytes,19,rep,name=groups"` + + // SeccompProfiles lists the allowed profiles that may be set for the pod or + // container's seccomp annotations. An unset (nil) or empty value means that no profiles may + // be specifid by the pod or container. The wildcard '*' may be used to allow all profiles. When + // used to generate a value for a pod the first non-wildcard profile will be used as + // the default. + // +nullable + SeccompProfiles []string `json:"seccompProfiles,omitempty" protobuf:"bytes,20,opt,name=seccompProfiles"` + + // AllowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. + // Each entry is either a plain sysctl name or ends in "*" in which case it is considered + // as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. + // Kubelet has to whitelist all allowed unsafe sysctls explicitly to avoid rejection. + // + // Examples: + // e.g. "foo/*" allows "foo/bar", "foo/baz", etc. + // e.g. "foo.*" allows "foo.bar", "foo.baz", etc. + // +optional + // +nullable + AllowedUnsafeSysctls []string `json:"allowedUnsafeSysctls,omitempty" protobuf:"bytes,24,rep,name=allowedUnsafeSysctls"` + // ForbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. + // Each entry is either a plain sysctl name or ends in "*" in which case it is considered + // as a prefix of forbidden sysctls. Single * means all sysctls are forbidden. + // + // Examples: + // e.g. "foo/*" forbids "foo/bar", "foo/baz", etc. + // e.g. "foo.*" forbids "foo.bar", "foo.baz", etc. + // +optional + // +nullable + ForbiddenSysctls []string `json:"forbiddenSysctls,omitempty" protobuf:"bytes,25,rep,name=forbiddenSysctls"` +} + +// FS Type gives strong typing to different file systems that are used by volumes. +type FSType string + +var ( + FSTypeAzureFile FSType = "azureFile" + FSTypeAzureDisk FSType = "azureDisk" + FSTypeFlocker FSType = "flocker" + FSTypeFlexVolume FSType = "flexVolume" + FSTypeHostPath FSType = "hostPath" + FSTypeEmptyDir FSType = "emptyDir" + FSTypeGCEPersistentDisk FSType = "gcePersistentDisk" + FSTypeAWSElasticBlockStore FSType = "awsElasticBlockStore" + FSTypeGitRepo FSType = "gitRepo" + FSTypeSecret FSType = "secret" + FSTypeNFS FSType = "nfs" + FSTypeISCSI FSType = "iscsi" + FSTypeGlusterfs FSType = "glusterfs" + FSTypePersistentVolumeClaim FSType = "persistentVolumeClaim" + FSTypeRBD FSType = "rbd" + FSTypeCinder FSType = "cinder" + FSTypeCephFS FSType = "cephFS" + FSTypeDownwardAPI FSType = "downwardAPI" + FSTypeFC FSType = "fc" + FSTypeConfigMap FSType = "configMap" + FSTypeVsphereVolume FSType = "vsphere" + FSTypeQuobyte FSType = "quobyte" + FSTypePhotonPersistentDisk FSType = "photonPersistentDisk" + FSProjected FSType = "projected" + FSPortworxVolume FSType = "portworxVolume" + FSScaleIO FSType = "scaleIO" + FSStorageOS FSType = "storageOS" + FSTypeCSI FSType = "csi" + FSTypeEphemeral FSType = "ephemeral" + FSTypeAll FSType = "*" + FSTypeNone FSType = "none" +) + +// AllowedFlexVolume represents a single Flexvolume that is allowed to be used. +type AllowedFlexVolume struct { + // Driver is the name of the Flexvolume driver. + Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"` +} + +// SELinuxContextStrategyOptions defines the strategy type and any options used to create the strategy. +type SELinuxContextStrategyOptions struct { + // Type is the strategy that will dictate what SELinux context is used in the SecurityContext. + Type SELinuxContextStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=SELinuxContextStrategyType"` + // seLinuxOptions required to run as; required for MustRunAs + SELinuxOptions *corev1.SELinuxOptions `json:"seLinuxOptions,omitempty" protobuf:"bytes,2,opt,name=seLinuxOptions"` +} + +// RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy. +type RunAsUserStrategyOptions struct { + // Type is the strategy that will dictate what RunAsUser is used in the SecurityContext. + Type RunAsUserStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=RunAsUserStrategyType"` + // UID is the user id that containers must run as. Required for the MustRunAs strategy if not using + // namespace/service account allocated uids. + UID *int64 `json:"uid,omitempty" protobuf:"varint,2,opt,name=uid"` + // UIDRangeMin defines the min value for a strategy that allocates by range. + UIDRangeMin *int64 `json:"uidRangeMin,omitempty" protobuf:"varint,3,opt,name=uidRangeMin"` + // UIDRangeMax defines the max value for a strategy that allocates by range. + UIDRangeMax *int64 `json:"uidRangeMax,omitempty" protobuf:"varint,4,opt,name=uidRangeMax"` +} + +// FSGroupStrategyOptions defines the strategy type and options used to create the strategy. +type FSGroupStrategyOptions struct { + // Type is the strategy that will dictate what FSGroup is used in the SecurityContext. + Type FSGroupStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=FSGroupStrategyType"` + // Ranges are the allowed ranges of fs groups. If you would like to force a single + // fs group then supply a single range with the same start and end. + Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` +} + +// SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy. +type SupplementalGroupsStrategyOptions struct { + // Type is the strategy that will dictate what supplemental groups is used in the SecurityContext. + Type SupplementalGroupsStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=SupplementalGroupsStrategyType"` + // Ranges are the allowed ranges of supplemental groups. If you would like to force a single + // supplemental group then supply a single range with the same start and end. + Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` +} + +// IDRange provides a min/max of an allowed range of IDs. +// TODO: this could be reused for UIDs. +type IDRange struct { + // Min is the start of the range, inclusive. + Min int64 `json:"min,omitempty" protobuf:"varint,1,opt,name=min"` + // Max is the end of the range, inclusive. + Max int64 `json:"max,omitempty" protobuf:"varint,2,opt,name=max"` +} + +// SELinuxContextStrategyType denotes strategy types for generating SELinux options for a +// SecurityContext +type SELinuxContextStrategyType string + +// RunAsUserStrategyType denotes strategy types for generating RunAsUser values for a +// SecurityContext +type RunAsUserStrategyType string + +// SupplementalGroupsStrategyType denotes strategy types for determining valid supplemental +// groups for a SecurityContext. +type SupplementalGroupsStrategyType string + +// FSGroupStrategyType denotes strategy types for generating FSGroup values for a +// SecurityContext +type FSGroupStrategyType string + +const ( + // container must have SELinux labels of X applied. + SELinuxStrategyMustRunAs SELinuxContextStrategyType = "MustRunAs" + // container may make requests for any SELinux context labels. + SELinuxStrategyRunAsAny SELinuxContextStrategyType = "RunAsAny" + + // container must run as a particular uid. + RunAsUserStrategyMustRunAs RunAsUserStrategyType = "MustRunAs" + // container must run as a particular uid. + RunAsUserStrategyMustRunAsRange RunAsUserStrategyType = "MustRunAsRange" + // container must run as a non-root uid + RunAsUserStrategyMustRunAsNonRoot RunAsUserStrategyType = "MustRunAsNonRoot" + // container may make requests for any uid. + RunAsUserStrategyRunAsAny RunAsUserStrategyType = "RunAsAny" + + // container must have FSGroup of X applied. + FSGroupStrategyMustRunAs FSGroupStrategyType = "MustRunAs" + // container may make requests for any FSGroup labels. + FSGroupStrategyRunAsAny FSGroupStrategyType = "RunAsAny" + + // container must run as a particular gid. + SupplementalGroupsStrategyMustRunAs SupplementalGroupsStrategyType = "MustRunAs" + // container may make requests for any gid. + SupplementalGroupsStrategyRunAsAny SupplementalGroupsStrategyType = "RunAsAny" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// SecurityContextConstraintsList is a list of SecurityContextConstraints objects +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type SecurityContextConstraintsList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // List of security context constraints. + Items []SecurityContextConstraints `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +genclient:onlyVerbs=create +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodSecurityPolicySubjectReview checks whether a particular user/SA tuple can create the PodTemplateSpec. +// +// Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=2 +type PodSecurityPolicySubjectReview struct { + metav1.TypeMeta `json:",inline"` + + // spec defines specification for the PodSecurityPolicySubjectReview. + Spec PodSecurityPolicySubjectReviewSpec `json:"spec" protobuf:"bytes,1,opt,name=spec"` + + // status represents the current information/status for the PodSecurityPolicySubjectReview. + Status PodSecurityPolicySubjectReviewStatus `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"` +} + +// PodSecurityPolicySubjectReviewSpec defines specification for PodSecurityPolicySubjectReview +type PodSecurityPolicySubjectReviewSpec struct { + // template is the PodTemplateSpec to check. If template.spec.serviceAccountName is empty it will not be defaulted. + // If its non-empty, it will be checked. + Template corev1.PodTemplateSpec `json:"template" protobuf:"bytes,1,opt,name=template"` + + // user is the user you're testing for. + // If you specify "user" but not "group", then is it interpreted as "What if user were not a member of any groups. + // If user and groups are empty, then the check is performed using *only* the serviceAccountName in the template. + User string `json:"user,omitempty" protobuf:"bytes,2,opt,name=user"` + + // groups is the groups you're testing for. + Groups []string `json:"groups,omitempty" protobuf:"bytes,3,rep,name=groups"` +} + +// PodSecurityPolicySubjectReviewStatus contains information/status for PodSecurityPolicySubjectReview. +type PodSecurityPolicySubjectReviewStatus struct { + // allowedBy is a reference to the rule that allows the PodTemplateSpec. + // A rule can be a SecurityContextConstraint or a PodSecurityPolicy + // A `nil`, indicates that it was denied. + AllowedBy *corev1.ObjectReference `json:"allowedBy,omitempty" protobuf:"bytes,1,opt,name=allowedBy"` + + // A machine-readable description of why this operation is in the + // "Failure" status. If this value is empty there + // is no information available. + Reason string `json:"reason,omitempty" protobuf:"bytes,2,opt,name=reason"` + + // template is the PodTemplateSpec after the defaulting is applied. + Template corev1.PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,3,opt,name=template"` +} + +// +genclient +// +genclient:onlyVerbs=create +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodSecurityPolicySelfSubjectReview checks whether this user/SA tuple can create the PodTemplateSpec +// +// Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=2 +type PodSecurityPolicySelfSubjectReview struct { + metav1.TypeMeta `json:",inline"` + + // spec defines specification the PodSecurityPolicySelfSubjectReview. + Spec PodSecurityPolicySelfSubjectReviewSpec `json:"spec" protobuf:"bytes,1,opt,name=spec"` + + // status represents the current information/status for the PodSecurityPolicySelfSubjectReview. + Status PodSecurityPolicySubjectReviewStatus `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"` +} + +// PodSecurityPolicySelfSubjectReviewSpec contains specification for PodSecurityPolicySelfSubjectReview. +type PodSecurityPolicySelfSubjectReviewSpec struct { + // template is the PodTemplateSpec to check. + Template corev1.PodTemplateSpec `json:"template" protobuf:"bytes,1,opt,name=template"` +} + +// +genclient +// +genclient:onlyVerbs=create +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodSecurityPolicyReview checks which service accounts (not users, since that would be cluster-wide) can create the `PodTemplateSpec` in question. +// +// Compatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=2 +type PodSecurityPolicyReview struct { + metav1.TypeMeta `json:",inline"` + + // spec is the PodSecurityPolicy to check. + Spec PodSecurityPolicyReviewSpec `json:"spec" protobuf:"bytes,1,opt,name=spec"` + + // status represents the current information/status for the PodSecurityPolicyReview. + Status PodSecurityPolicyReviewStatus `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"` +} + +// PodSecurityPolicyReviewSpec defines specification for PodSecurityPolicyReview +type PodSecurityPolicyReviewSpec struct { + // template is the PodTemplateSpec to check. The template.spec.serviceAccountName field is used + // if serviceAccountNames is empty, unless the template.spec.serviceAccountName is empty, + // in which case "default" is used. + // If serviceAccountNames is specified, template.spec.serviceAccountName is ignored. + Template corev1.PodTemplateSpec `json:"template" protobuf:"bytes,1,opt,name=template"` + + // serviceAccountNames is an optional set of ServiceAccounts to run the check with. + // If serviceAccountNames is empty, the template.spec.serviceAccountName is used, + // unless it's empty, in which case "default" is used instead. + // If serviceAccountNames is specified, template.spec.serviceAccountName is ignored. + ServiceAccountNames []string `json:"serviceAccountNames,omitempty" protobuf:"bytes,2,rep,name=serviceAccountNames"` // TODO: find a way to express 'all service accounts' +} + +// PodSecurityPolicyReviewStatus represents the status of PodSecurityPolicyReview. +type PodSecurityPolicyReviewStatus struct { + // allowedServiceAccounts returns the list of service accounts in *this* namespace that have the power to create the PodTemplateSpec. + AllowedServiceAccounts []ServiceAccountPodSecurityPolicyReviewStatus `json:"allowedServiceAccounts" protobuf:"bytes,1,rep,name=allowedServiceAccounts"` +} + +// ServiceAccountPodSecurityPolicyReviewStatus represents ServiceAccount name and related review status +type ServiceAccountPodSecurityPolicyReviewStatus struct { + PodSecurityPolicySubjectReviewStatus `json:",inline" protobuf:"bytes,1,opt,name=podSecurityPolicySubjectReviewStatus"` + + // name contains the allowed and the denied ServiceAccount name + Name string `json:"name" protobuf:"bytes,2,opt,name=name"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RangeAllocation is used so we can easily expose a RangeAllocation typed for security group +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +type RangeAllocation struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // range is a string representing a unique label for a range of uids, "1000000000-2000000000/10000". + Range string `json:"range" protobuf:"bytes,2,opt,name=range"` + + // data is a byte array representing the serialized state of a range allocation. It is a bitmap + // with each bit set to one to represent a range is taken. + Data []byte `json:"data" protobuf:"bytes,3,opt,name=data"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RangeAllocationList is a list of RangeAllocations objects +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type RangeAllocationList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // List of RangeAllocations. + Items []RangeAllocation `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/github.com/openshift/api/security/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/security/v1/zz_generated.deepcopy.go new file mode 100644 index 000000000..26c88f7de --- /dev/null +++ b/vendor/github.com/openshift/api/security/v1/zz_generated.deepcopy.go @@ -0,0 +1,533 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AllowedFlexVolume) DeepCopyInto(out *AllowedFlexVolume) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedFlexVolume. +func (in *AllowedFlexVolume) DeepCopy() *AllowedFlexVolume { + if in == nil { + return nil + } + out := new(AllowedFlexVolume) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FSGroupStrategyOptions) DeepCopyInto(out *FSGroupStrategyOptions) { + *out = *in + if in.Ranges != nil { + in, out := &in.Ranges, &out.Ranges + *out = make([]IDRange, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FSGroupStrategyOptions. +func (in *FSGroupStrategyOptions) DeepCopy() *FSGroupStrategyOptions { + if in == nil { + return nil + } + out := new(FSGroupStrategyOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IDRange) DeepCopyInto(out *IDRange) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IDRange. +func (in *IDRange) DeepCopy() *IDRange { + if in == nil { + return nil + } + out := new(IDRange) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodSecurityPolicyReview) DeepCopyInto(out *PodSecurityPolicyReview) { + *out = *in + out.TypeMeta = in.TypeMeta + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicyReview. +func (in *PodSecurityPolicyReview) DeepCopy() *PodSecurityPolicyReview { + if in == nil { + return nil + } + out := new(PodSecurityPolicyReview) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodSecurityPolicyReview) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodSecurityPolicyReviewSpec) DeepCopyInto(out *PodSecurityPolicyReviewSpec) { + *out = *in + in.Template.DeepCopyInto(&out.Template) + if in.ServiceAccountNames != nil { + in, out := &in.ServiceAccountNames, &out.ServiceAccountNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicyReviewSpec. +func (in *PodSecurityPolicyReviewSpec) DeepCopy() *PodSecurityPolicyReviewSpec { + if in == nil { + return nil + } + out := new(PodSecurityPolicyReviewSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodSecurityPolicyReviewStatus) DeepCopyInto(out *PodSecurityPolicyReviewStatus) { + *out = *in + if in.AllowedServiceAccounts != nil { + in, out := &in.AllowedServiceAccounts, &out.AllowedServiceAccounts + *out = make([]ServiceAccountPodSecurityPolicyReviewStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicyReviewStatus. +func (in *PodSecurityPolicyReviewStatus) DeepCopy() *PodSecurityPolicyReviewStatus { + if in == nil { + return nil + } + out := new(PodSecurityPolicyReviewStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodSecurityPolicySelfSubjectReview) DeepCopyInto(out *PodSecurityPolicySelfSubjectReview) { + *out = *in + out.TypeMeta = in.TypeMeta + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicySelfSubjectReview. +func (in *PodSecurityPolicySelfSubjectReview) DeepCopy() *PodSecurityPolicySelfSubjectReview { + if in == nil { + return nil + } + out := new(PodSecurityPolicySelfSubjectReview) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodSecurityPolicySelfSubjectReview) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodSecurityPolicySelfSubjectReviewSpec) DeepCopyInto(out *PodSecurityPolicySelfSubjectReviewSpec) { + *out = *in + in.Template.DeepCopyInto(&out.Template) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicySelfSubjectReviewSpec. +func (in *PodSecurityPolicySelfSubjectReviewSpec) DeepCopy() *PodSecurityPolicySelfSubjectReviewSpec { + if in == nil { + return nil + } + out := new(PodSecurityPolicySelfSubjectReviewSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodSecurityPolicySubjectReview) DeepCopyInto(out *PodSecurityPolicySubjectReview) { + *out = *in + out.TypeMeta = in.TypeMeta + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicySubjectReview. +func (in *PodSecurityPolicySubjectReview) DeepCopy() *PodSecurityPolicySubjectReview { + if in == nil { + return nil + } + out := new(PodSecurityPolicySubjectReview) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodSecurityPolicySubjectReview) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodSecurityPolicySubjectReviewSpec) DeepCopyInto(out *PodSecurityPolicySubjectReviewSpec) { + *out = *in + in.Template.DeepCopyInto(&out.Template) + if in.Groups != nil { + in, out := &in.Groups, &out.Groups + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicySubjectReviewSpec. +func (in *PodSecurityPolicySubjectReviewSpec) DeepCopy() *PodSecurityPolicySubjectReviewSpec { + if in == nil { + return nil + } + out := new(PodSecurityPolicySubjectReviewSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodSecurityPolicySubjectReviewStatus) DeepCopyInto(out *PodSecurityPolicySubjectReviewStatus) { + *out = *in + if in.AllowedBy != nil { + in, out := &in.AllowedBy, &out.AllowedBy + *out = new(corev1.ObjectReference) + **out = **in + } + in.Template.DeepCopyInto(&out.Template) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicySubjectReviewStatus. +func (in *PodSecurityPolicySubjectReviewStatus) DeepCopy() *PodSecurityPolicySubjectReviewStatus { + if in == nil { + return nil + } + out := new(PodSecurityPolicySubjectReviewStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RangeAllocation) DeepCopyInto(out *RangeAllocation) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = make([]byte, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RangeAllocation. +func (in *RangeAllocation) DeepCopy() *RangeAllocation { + if in == nil { + return nil + } + out := new(RangeAllocation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RangeAllocation) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RangeAllocationList) DeepCopyInto(out *RangeAllocationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]RangeAllocation, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RangeAllocationList. +func (in *RangeAllocationList) DeepCopy() *RangeAllocationList { + if in == nil { + return nil + } + out := new(RangeAllocationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RangeAllocationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RunAsUserStrategyOptions) DeepCopyInto(out *RunAsUserStrategyOptions) { + *out = *in + if in.UID != nil { + in, out := &in.UID, &out.UID + *out = new(int64) + **out = **in + } + if in.UIDRangeMin != nil { + in, out := &in.UIDRangeMin, &out.UIDRangeMin + *out = new(int64) + **out = **in + } + if in.UIDRangeMax != nil { + in, out := &in.UIDRangeMax, &out.UIDRangeMax + *out = new(int64) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunAsUserStrategyOptions. +func (in *RunAsUserStrategyOptions) DeepCopy() *RunAsUserStrategyOptions { + if in == nil { + return nil + } + out := new(RunAsUserStrategyOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SELinuxContextStrategyOptions) DeepCopyInto(out *SELinuxContextStrategyOptions) { + *out = *in + if in.SELinuxOptions != nil { + in, out := &in.SELinuxOptions, &out.SELinuxOptions + *out = new(corev1.SELinuxOptions) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SELinuxContextStrategyOptions. +func (in *SELinuxContextStrategyOptions) DeepCopy() *SELinuxContextStrategyOptions { + if in == nil { + return nil + } + out := new(SELinuxContextStrategyOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityContextConstraints) DeepCopyInto(out *SecurityContextConstraints) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(int32) + **out = **in + } + if in.DefaultAddCapabilities != nil { + in, out := &in.DefaultAddCapabilities, &out.DefaultAddCapabilities + *out = make([]corev1.Capability, len(*in)) + copy(*out, *in) + } + if in.RequiredDropCapabilities != nil { + in, out := &in.RequiredDropCapabilities, &out.RequiredDropCapabilities + *out = make([]corev1.Capability, len(*in)) + copy(*out, *in) + } + if in.AllowedCapabilities != nil { + in, out := &in.AllowedCapabilities, &out.AllowedCapabilities + *out = make([]corev1.Capability, len(*in)) + copy(*out, *in) + } + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]FSType, len(*in)) + copy(*out, *in) + } + if in.AllowedFlexVolumes != nil { + in, out := &in.AllowedFlexVolumes, &out.AllowedFlexVolumes + *out = make([]AllowedFlexVolume, len(*in)) + copy(*out, *in) + } + if in.DefaultAllowPrivilegeEscalation != nil { + in, out := &in.DefaultAllowPrivilegeEscalation, &out.DefaultAllowPrivilegeEscalation + *out = new(bool) + **out = **in + } + if in.AllowPrivilegeEscalation != nil { + in, out := &in.AllowPrivilegeEscalation, &out.AllowPrivilegeEscalation + *out = new(bool) + **out = **in + } + in.SELinuxContext.DeepCopyInto(&out.SELinuxContext) + in.RunAsUser.DeepCopyInto(&out.RunAsUser) + in.SupplementalGroups.DeepCopyInto(&out.SupplementalGroups) + in.FSGroup.DeepCopyInto(&out.FSGroup) + if in.Users != nil { + in, out := &in.Users, &out.Users + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Groups != nil { + in, out := &in.Groups, &out.Groups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SeccompProfiles != nil { + in, out := &in.SeccompProfiles, &out.SeccompProfiles + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.AllowedUnsafeSysctls != nil { + in, out := &in.AllowedUnsafeSysctls, &out.AllowedUnsafeSysctls + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ForbiddenSysctls != nil { + in, out := &in.ForbiddenSysctls, &out.ForbiddenSysctls + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityContextConstraints. +func (in *SecurityContextConstraints) DeepCopy() *SecurityContextConstraints { + if in == nil { + return nil + } + out := new(SecurityContextConstraints) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SecurityContextConstraints) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityContextConstraintsList) DeepCopyInto(out *SecurityContextConstraintsList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]SecurityContextConstraints, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityContextConstraintsList. +func (in *SecurityContextConstraintsList) DeepCopy() *SecurityContextConstraintsList { + if in == nil { + return nil + } + out := new(SecurityContextConstraintsList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SecurityContextConstraintsList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountPodSecurityPolicyReviewStatus) DeepCopyInto(out *ServiceAccountPodSecurityPolicyReviewStatus) { + *out = *in + in.PodSecurityPolicySubjectReviewStatus.DeepCopyInto(&out.PodSecurityPolicySubjectReviewStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountPodSecurityPolicyReviewStatus. +func (in *ServiceAccountPodSecurityPolicyReviewStatus) DeepCopy() *ServiceAccountPodSecurityPolicyReviewStatus { + if in == nil { + return nil + } + out := new(ServiceAccountPodSecurityPolicyReviewStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SupplementalGroupsStrategyOptions) DeepCopyInto(out *SupplementalGroupsStrategyOptions) { + *out = *in + if in.Ranges != nil { + in, out := &in.Ranges, &out.Ranges + *out = make([]IDRange, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SupplementalGroupsStrategyOptions. +func (in *SupplementalGroupsStrategyOptions) DeepCopy() *SupplementalGroupsStrategyOptions { + if in == nil { + return nil + } + out := new(SupplementalGroupsStrategyOptions) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/api/security/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/security/v1/zz_generated.swagger_doc_generated.go new file mode 100644 index 000000000..a72b8ecf0 --- /dev/null +++ b/vendor/github.com/openshift/api/security/v1/zz_generated.swagger_doc_generated.go @@ -0,0 +1,228 @@ +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_AllowedFlexVolume = map[string]string{ + "": "AllowedFlexVolume represents a single Flexvolume that is allowed to be used.", + "driver": "Driver is the name of the Flexvolume driver.", +} + +func (AllowedFlexVolume) SwaggerDoc() map[string]string { + return map_AllowedFlexVolume +} + +var map_FSGroupStrategyOptions = map[string]string{ + "": "FSGroupStrategyOptions defines the strategy type and options used to create the strategy.", + "type": "Type is the strategy that will dictate what FSGroup is used in the SecurityContext.", + "ranges": "Ranges are the allowed ranges of fs groups. If you would like to force a single fs group then supply a single range with the same start and end.", +} + +func (FSGroupStrategyOptions) SwaggerDoc() map[string]string { + return map_FSGroupStrategyOptions +} + +var map_IDRange = map[string]string{ + "": "IDRange provides a min/max of an allowed range of IDs.", + "min": "Min is the start of the range, inclusive.", + "max": "Max is the end of the range, inclusive.", +} + +func (IDRange) SwaggerDoc() map[string]string { + return map_IDRange +} + +var map_PodSecurityPolicyReview = map[string]string{ + "": "PodSecurityPolicyReview checks which service accounts (not users, since that would be cluster-wide) can create the `PodTemplateSpec` in question.\n\nCompatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", + "spec": "spec is the PodSecurityPolicy to check.", + "status": "status represents the current information/status for the PodSecurityPolicyReview.", +} + +func (PodSecurityPolicyReview) SwaggerDoc() map[string]string { + return map_PodSecurityPolicyReview +} + +var map_PodSecurityPolicyReviewSpec = map[string]string{ + "": "PodSecurityPolicyReviewSpec defines specification for PodSecurityPolicyReview", + "template": "template is the PodTemplateSpec to check. The template.spec.serviceAccountName field is used if serviceAccountNames is empty, unless the template.spec.serviceAccountName is empty, in which case \"default\" is used. If serviceAccountNames is specified, template.spec.serviceAccountName is ignored.", + "serviceAccountNames": "serviceAccountNames is an optional set of ServiceAccounts to run the check with. If serviceAccountNames is empty, the template.spec.serviceAccountName is used, unless it's empty, in which case \"default\" is used instead. If serviceAccountNames is specified, template.spec.serviceAccountName is ignored.", +} + +func (PodSecurityPolicyReviewSpec) SwaggerDoc() map[string]string { + return map_PodSecurityPolicyReviewSpec +} + +var map_PodSecurityPolicyReviewStatus = map[string]string{ + "": "PodSecurityPolicyReviewStatus represents the status of PodSecurityPolicyReview.", + "allowedServiceAccounts": "allowedServiceAccounts returns the list of service accounts in *this* namespace that have the power to create the PodTemplateSpec.", +} + +func (PodSecurityPolicyReviewStatus) SwaggerDoc() map[string]string { + return map_PodSecurityPolicyReviewStatus +} + +var map_PodSecurityPolicySelfSubjectReview = map[string]string{ + "": "PodSecurityPolicySelfSubjectReview checks whether this user/SA tuple can create the PodTemplateSpec\n\nCompatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", + "spec": "spec defines specification the PodSecurityPolicySelfSubjectReview.", + "status": "status represents the current information/status for the PodSecurityPolicySelfSubjectReview.", +} + +func (PodSecurityPolicySelfSubjectReview) SwaggerDoc() map[string]string { + return map_PodSecurityPolicySelfSubjectReview +} + +var map_PodSecurityPolicySelfSubjectReviewSpec = map[string]string{ + "": "PodSecurityPolicySelfSubjectReviewSpec contains specification for PodSecurityPolicySelfSubjectReview.", + "template": "template is the PodTemplateSpec to check.", +} + +func (PodSecurityPolicySelfSubjectReviewSpec) SwaggerDoc() map[string]string { + return map_PodSecurityPolicySelfSubjectReviewSpec +} + +var map_PodSecurityPolicySubjectReview = map[string]string{ + "": "PodSecurityPolicySubjectReview checks whether a particular user/SA tuple can create the PodTemplateSpec.\n\nCompatibility level 2: Stable within a major release for a minimum of 9 months or 3 minor releases (whichever is longer).", + "spec": "spec defines specification for the PodSecurityPolicySubjectReview.", + "status": "status represents the current information/status for the PodSecurityPolicySubjectReview.", +} + +func (PodSecurityPolicySubjectReview) SwaggerDoc() map[string]string { + return map_PodSecurityPolicySubjectReview +} + +var map_PodSecurityPolicySubjectReviewSpec = map[string]string{ + "": "PodSecurityPolicySubjectReviewSpec defines specification for PodSecurityPolicySubjectReview", + "template": "template is the PodTemplateSpec to check. If template.spec.serviceAccountName is empty it will not be defaulted. If its non-empty, it will be checked.", + "user": "user is the user you're testing for. If you specify \"user\" but not \"group\", then is it interpreted as \"What if user were not a member of any groups. If user and groups are empty, then the check is performed using *only* the serviceAccountName in the template.", + "groups": "groups is the groups you're testing for.", +} + +func (PodSecurityPolicySubjectReviewSpec) SwaggerDoc() map[string]string { + return map_PodSecurityPolicySubjectReviewSpec +} + +var map_PodSecurityPolicySubjectReviewStatus = map[string]string{ + "": "PodSecurityPolicySubjectReviewStatus contains information/status for PodSecurityPolicySubjectReview.", + "allowedBy": "allowedBy is a reference to the rule that allows the PodTemplateSpec. A rule can be a SecurityContextConstraint or a PodSecurityPolicy A `nil`, indicates that it was denied.", + "reason": "A machine-readable description of why this operation is in the \"Failure\" status. If this value is empty there is no information available.", + "template": "template is the PodTemplateSpec after the defaulting is applied.", +} + +func (PodSecurityPolicySubjectReviewStatus) SwaggerDoc() map[string]string { + return map_PodSecurityPolicySubjectReviewStatus +} + +var map_RangeAllocation = map[string]string{ + "": "RangeAllocation is used so we can easily expose a RangeAllocation typed for security group\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "range": "range is a string representing a unique label for a range of uids, \"1000000000-2000000000/10000\".", + "data": "data is a byte array representing the serialized state of a range allocation. It is a bitmap with each bit set to one to represent a range is taken.", +} + +func (RangeAllocation) SwaggerDoc() map[string]string { + return map_RangeAllocation +} + +var map_RangeAllocationList = map[string]string{ + "": "RangeAllocationList is a list of RangeAllocations objects\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "List of RangeAllocations.", +} + +func (RangeAllocationList) SwaggerDoc() map[string]string { + return map_RangeAllocationList +} + +var map_RunAsUserStrategyOptions = map[string]string{ + "": "RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy.", + "type": "Type is the strategy that will dictate what RunAsUser is used in the SecurityContext.", + "uid": "UID is the user id that containers must run as. Required for the MustRunAs strategy if not using namespace/service account allocated uids.", + "uidRangeMin": "UIDRangeMin defines the min value for a strategy that allocates by range.", + "uidRangeMax": "UIDRangeMax defines the max value for a strategy that allocates by range.", +} + +func (RunAsUserStrategyOptions) SwaggerDoc() map[string]string { + return map_RunAsUserStrategyOptions +} + +var map_SELinuxContextStrategyOptions = map[string]string{ + "": "SELinuxContextStrategyOptions defines the strategy type and any options used to create the strategy.", + "type": "Type is the strategy that will dictate what SELinux context is used in the SecurityContext.", + "seLinuxOptions": "seLinuxOptions required to run as; required for MustRunAs", +} + +func (SELinuxContextStrategyOptions) SwaggerDoc() map[string]string { + return map_SELinuxContextStrategyOptions +} + +var map_SecurityContextConstraints = map[string]string{ + "": "SecurityContextConstraints governs the ability to make requests that affect the SecurityContext that will be applied to a container. For historical reasons SCC was exposed under the core Kubernetes API group. That exposure is deprecated and will be removed in a future release - users should instead use the security.openshift.io group to manage SecurityContextConstraints.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "priority": "Priority influences the sort order of SCCs when evaluating which SCCs to try first for a given pod request based on access in the Users and Groups fields. The higher the int, the higher priority. An unset value is considered a 0 priority. If scores for multiple SCCs are equal they will be sorted from most restrictive to least restrictive. If both priorities and restrictions are equal the SCCs will be sorted by name.", + "allowPrivilegedContainer": "AllowPrivilegedContainer determines if a container can request to be run as privileged.", + "defaultAddCapabilities": "DefaultAddCapabilities is the default set of capabilities that will be added to the container unless the pod spec specifically drops the capability. You may not list a capabiility in both DefaultAddCapabilities and RequiredDropCapabilities.", + "requiredDropCapabilities": "RequiredDropCapabilities are the capabilities that will be dropped from the container. These are required to be dropped and cannot be added.", + "allowedCapabilities": "AllowedCapabilities is a list of capabilities that can be requested to add to the container. Capabilities in this field maybe added at the pod author's discretion. You must not list a capability in both AllowedCapabilities and RequiredDropCapabilities. To allow all capabilities you may use '*'.", + "allowHostDirVolumePlugin": "AllowHostDirVolumePlugin determines if the policy allow containers to use the HostDir volume plugin", + "volumes": "Volumes is a white list of allowed volume plugins. FSType corresponds directly with the field names of a VolumeSource (azureFile, configMap, emptyDir). To allow all volumes you may use \"*\". To allow no volumes, set to [\"none\"].", + "allowedFlexVolumes": "AllowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes is allowed in the \"Volumes\" field.", + "allowHostNetwork": "AllowHostNetwork determines if the policy allows the use of HostNetwork in the pod spec.", + "allowHostPorts": "AllowHostPorts determines if the policy allows host ports in the containers.", + "allowHostPID": "AllowHostPID determines if the policy allows host pid in the containers.", + "allowHostIPC": "AllowHostIPC determines if the policy allows host ipc in the containers.", + "defaultAllowPrivilegeEscalation": "DefaultAllowPrivilegeEscalation controls the default setting for whether a process can gain more privileges than its parent process.", + "allowPrivilegeEscalation": "AllowPrivilegeEscalation determines if a pod can request to allow privilege escalation. If unspecified, defaults to true.", + "seLinuxContext": "SELinuxContext is the strategy that will dictate what labels will be set in the SecurityContext.", + "runAsUser": "RunAsUser is the strategy that will dictate what RunAsUser is used in the SecurityContext.", + "supplementalGroups": "SupplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext.", + "fsGroup": "FSGroup is the strategy that will dictate what fs group is used by the SecurityContext.", + "readOnlyRootFilesystem": "ReadOnlyRootFilesystem when set to true will force containers to run with a read only root file system. If the container specifically requests to run with a non-read only root file system the SCC should deny the pod. If set to false the container may run with a read only root file system if it wishes but it will not be forced to.", + "users": "The users who have permissions to use this security context constraints", + "groups": "The groups that have permission to use this security context constraints", + "seccompProfiles": "SeccompProfiles lists the allowed profiles that may be set for the pod or container's seccomp annotations. An unset (nil) or empty value means that no profiles may be specifid by the pod or container.\tThe wildcard '*' may be used to allow all profiles. When used to generate a value for a pod the first non-wildcard profile will be used as the default.", + "allowedUnsafeSysctls": "AllowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. Kubelet has to whitelist all allowed unsafe sysctls explicitly to avoid rejection.\n\nExamples: e.g. \"foo/*\" allows \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" allows \"foo.bar\", \"foo.baz\", etc.", + "forbiddenSysctls": "ForbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of forbidden sysctls. Single * means all sysctls are forbidden.\n\nExamples: e.g. \"foo/*\" forbids \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" forbids \"foo.bar\", \"foo.baz\", etc.", +} + +func (SecurityContextConstraints) SwaggerDoc() map[string]string { + return map_SecurityContextConstraints +} + +var map_SecurityContextConstraintsList = map[string]string{ + "": "SecurityContextConstraintsList is a list of SecurityContextConstraints objects\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "List of security context constraints.", +} + +func (SecurityContextConstraintsList) SwaggerDoc() map[string]string { + return map_SecurityContextConstraintsList +} + +var map_ServiceAccountPodSecurityPolicyReviewStatus = map[string]string{ + "": "ServiceAccountPodSecurityPolicyReviewStatus represents ServiceAccount name and related review status", + "name": "name contains the allowed and the denied ServiceAccount name", +} + +func (ServiceAccountPodSecurityPolicyReviewStatus) SwaggerDoc() map[string]string { + return map_ServiceAccountPodSecurityPolicyReviewStatus +} + +var map_SupplementalGroupsStrategyOptions = map[string]string{ + "": "SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy.", + "type": "Type is the strategy that will dictate what supplemental groups is used in the SecurityContext.", + "ranges": "Ranges are the allowed ranges of supplemental groups. If you would like to force a single supplemental group then supply a single range with the same start and end.", +} + +func (SupplementalGroupsStrategyOptions) SwaggerDoc() map[string]string { + return map_SupplementalGroupsStrategyOptions +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/api/servicecertsigner/.codegen.yaml b/vendor/github.com/openshift/api/servicecertsigner/.codegen.yaml new file mode 100644 index 000000000..ffa2c8d9b --- /dev/null +++ b/vendor/github.com/openshift/api/servicecertsigner/.codegen.yaml @@ -0,0 +1,2 @@ +swaggerdocs: + commentPolicy: Warn diff --git a/vendor/github.com/openshift/api/servicecertsigner/install.go b/vendor/github.com/openshift/api/servicecertsigner/install.go new file mode 100644 index 000000000..98d891d34 --- /dev/null +++ b/vendor/github.com/openshift/api/servicecertsigner/install.go @@ -0,0 +1,26 @@ +package servicecertsigner + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + servicecertsignerv1alpha1 "github.com/openshift/api/servicecertsigner/v1alpha1" +) + +const ( + GroupName = "servicecertsigner.config.openshift.io" +) + +var ( + schemeBuilder = runtime.NewSchemeBuilder(servicecertsignerv1alpha1.Install) + // Install is a function which adds every version of this group to a scheme + Install = schemeBuilder.AddToScheme +) + +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +func Kind(kind string) schema.GroupKind { + return schema.GroupKind{Group: GroupName, Kind: kind} +} diff --git a/vendor/github.com/openshift/api/servicecertsigner/v1alpha1/doc.go b/vendor/github.com/openshift/api/servicecertsigner/v1alpha1/doc.go new file mode 100644 index 000000000..6ce02bdb3 --- /dev/null +++ b/vendor/github.com/openshift/api/servicecertsigner/v1alpha1/doc.go @@ -0,0 +1,6 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true + +// +groupName=servicecertsigner.config.openshift.io +package v1alpha1 diff --git a/vendor/github.com/openshift/api/servicecertsigner/v1alpha1/register.go b/vendor/github.com/openshift/api/servicecertsigner/v1alpha1/register.go new file mode 100644 index 000000000..19ef421b2 --- /dev/null +++ b/vendor/github.com/openshift/api/servicecertsigner/v1alpha1/register.go @@ -0,0 +1,40 @@ +package v1alpha1 + +import ( + configv1 "github.com/openshift/api/config/v1" + operatorsv1alpha1api "github.com/openshift/api/operator/v1alpha1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + GroupName = "servicecertsigner.config.openshift.io" + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, configv1.Install, operatorsv1alpha1api.Install) + // Install is a function which adds this version to a scheme + Install = schemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = schemeBuilder.AddToScheme +) + +// Resource generated code relies on this being here, but it logically belongs to the group +// DEPRECATED +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &ServiceCertSignerOperatorConfig{}, + &ServiceCertSignerOperatorConfigList{}, + ) + metav1.AddToGroupVersion(scheme, GroupVersion) + + return nil +} diff --git a/vendor/github.com/openshift/api/servicecertsigner/v1alpha1/types.go b/vendor/github.com/openshift/api/servicecertsigner/v1alpha1/types.go new file mode 100644 index 000000000..ebd8d75ef --- /dev/null +++ b/vendor/github.com/openshift/api/servicecertsigner/v1alpha1/types.go @@ -0,0 +1,53 @@ +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + operatorv1 "github.com/openshift/api/operator/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ServiceCertSignerOperatorConfig provides information to configure an operator to manage the service cert signing controllers +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +// +openshift:compatibility-gen:internal +type ServiceCertSignerOperatorConfig struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata"` + + Spec ServiceCertSignerOperatorConfigSpec `json:"spec"` + Status ServiceCertSignerOperatorConfigStatus `json:"status"` +} + +type ServiceCertSignerOperatorConfigSpec struct { + operatorv1.OperatorSpec `json:",inline"` +} + +type ServiceCertSignerOperatorConfigStatus struct { + operatorv1.OperatorStatus `json:",inline"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ServiceCertSignerOperatorConfigList is a collection of items +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +// +openshift:compatibility-gen:internal +type ServiceCertSignerOperatorConfigList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty"` + + // Items contains the items + Items []ServiceCertSignerOperatorConfig `json:"items"` +} diff --git a/vendor/github.com/openshift/api/servicecertsigner/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/servicecertsigner/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 000000000..2661c23aa --- /dev/null +++ b/vendor/github.com/openshift/api/servicecertsigner/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,105 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCertSignerOperatorConfig) DeepCopyInto(out *ServiceCertSignerOperatorConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCertSignerOperatorConfig. +func (in *ServiceCertSignerOperatorConfig) DeepCopy() *ServiceCertSignerOperatorConfig { + if in == nil { + return nil + } + out := new(ServiceCertSignerOperatorConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceCertSignerOperatorConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCertSignerOperatorConfigList) DeepCopyInto(out *ServiceCertSignerOperatorConfigList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ServiceCertSignerOperatorConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCertSignerOperatorConfigList. +func (in *ServiceCertSignerOperatorConfigList) DeepCopy() *ServiceCertSignerOperatorConfigList { + if in == nil { + return nil + } + out := new(ServiceCertSignerOperatorConfigList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceCertSignerOperatorConfigList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCertSignerOperatorConfigSpec) DeepCopyInto(out *ServiceCertSignerOperatorConfigSpec) { + *out = *in + in.OperatorSpec.DeepCopyInto(&out.OperatorSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCertSignerOperatorConfigSpec. +func (in *ServiceCertSignerOperatorConfigSpec) DeepCopy() *ServiceCertSignerOperatorConfigSpec { + if in == nil { + return nil + } + out := new(ServiceCertSignerOperatorConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceCertSignerOperatorConfigStatus) DeepCopyInto(out *ServiceCertSignerOperatorConfigStatus) { + *out = *in + in.OperatorStatus.DeepCopyInto(&out.OperatorStatus) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCertSignerOperatorConfigStatus. +func (in *ServiceCertSignerOperatorConfigStatus) DeepCopy() *ServiceCertSignerOperatorConfigStatus { + if in == nil { + return nil + } + out := new(ServiceCertSignerOperatorConfigStatus) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/api/servicecertsigner/v1alpha1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/servicecertsigner/v1alpha1/zz_generated.swagger_doc_generated.go new file mode 100644 index 000000000..13b3b7364 --- /dev/null +++ b/vendor/github.com/openshift/api/servicecertsigner/v1alpha1/zz_generated.swagger_doc_generated.go @@ -0,0 +1,33 @@ +package v1alpha1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_ServiceCertSignerOperatorConfig = map[string]string{ + "": "ServiceCertSignerOperatorConfig provides information to configure an operator to manage the service cert signing controllers\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (ServiceCertSignerOperatorConfig) SwaggerDoc() map[string]string { + return map_ServiceCertSignerOperatorConfig +} + +var map_ServiceCertSignerOperatorConfigList = map[string]string{ + "": "ServiceCertSignerOperatorConfigList is a collection of items\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support.", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "Items contains the items", +} + +func (ServiceCertSignerOperatorConfigList) SwaggerDoc() map[string]string { + return map_ServiceCertSignerOperatorConfigList +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/api/sharedresource/.codegen.yaml b/vendor/github.com/openshift/api/sharedresource/.codegen.yaml new file mode 100644 index 000000000..ffa2c8d9b --- /dev/null +++ b/vendor/github.com/openshift/api/sharedresource/.codegen.yaml @@ -0,0 +1,2 @@ +swaggerdocs: + commentPolicy: Warn diff --git a/vendor/github.com/openshift/api/sharedresource/OWNERS b/vendor/github.com/openshift/api/sharedresource/OWNERS new file mode 100644 index 000000000..c89bc9387 --- /dev/null +++ b/vendor/github.com/openshift/api/sharedresource/OWNERS @@ -0,0 +1,5 @@ +reviewers: + - bparees + - gabemontero + - adambkaplan + - coreydaley diff --git a/vendor/github.com/openshift/api/sharedresource/install.go b/vendor/github.com/openshift/api/sharedresource/install.go new file mode 100644 index 000000000..40eae94a9 --- /dev/null +++ b/vendor/github.com/openshift/api/sharedresource/install.go @@ -0,0 +1,26 @@ +package sharedresource + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + v1alpha1 "github.com/openshift/api/sharedresource/v1alpha1" +) + +const ( + GroupName = "sharedresource.openshift.io" +) + +var ( + schemeBuilder = runtime.NewSchemeBuilder(v1alpha1.Install) + // Install is a function which adds every version of this group to a scheme + Install = schemeBuilder.AddToScheme +) + +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +func Kind(kind string) schema.GroupKind { + return schema.GroupKind{Group: GroupName, Kind: kind} +} diff --git a/vendor/github.com/openshift/api/sharedresource/v1alpha1/0000_10_sharedconfigmap.crd.yaml b/vendor/github.com/openshift/api/sharedresource/v1alpha1/0000_10_sharedconfigmap.crd.yaml new file mode 100644 index 000000000..e5ae3195d --- /dev/null +++ b/vendor/github.com/openshift/api/sharedresource/v1alpha1/0000_10_sharedconfigmap.crd.yaml @@ -0,0 +1,105 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/979 + description: Extension for sharing ConfigMaps across Namespaces + displayName: SharedConfigMap + name: sharedconfigmaps.sharedresource.openshift.io +spec: + group: sharedresource.openshift.io + names: + kind: SharedConfigMap + listKind: SharedConfigMapList + plural: sharedconfigmaps + singular: sharedconfigmap + scope: Cluster + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: "SharedConfigMap allows a ConfigMap to be shared across namespaces. Pods can mount the shared ConfigMap by adding a CSI volume to the pod specification using the \"csi.sharedresource.openshift.io\" CSI driver and a reference to the SharedConfigMap in the volume attributes: \n spec: volumes: - name: shared-configmap csi: driver: csi.sharedresource.openshift.io volumeAttributes: sharedConfigMap: my-share \n For the mount to be successful, the pod's service account must be granted permission to 'use' the named SharedConfigMap object within its namespace with an appropriate Role and RoleBinding. For compactness, here are example `oc` invocations for creating such Role and RoleBinding objects. \n `oc create role shared-resource-my-share --verb=use --resource=sharedconfigmaps.sharedresource.openshift.io --resource-name=my-share` `oc create rolebinding shared-resource-my-share --role=shared-resource-my-share --serviceaccount=my-namespace:default` \n Shared resource objects, in this case ConfigMaps, have default permissions of list, get, and watch for system authenticated users. \n Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. These capabilities should not be used by applications needing long term support." + type: object + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec is the specification of the desired shared configmap + type: object + required: + - configMapRef + properties: + configMapRef: + description: configMapRef is a reference to the ConfigMap to share + type: object + required: + - name + - namespace + properties: + name: + description: name represents the name of the ConfigMap that is being referenced. + type: string + namespace: + description: namespace represents the namespace where the referenced ConfigMap is located. + type: string + description: + description: description is a user readable explanation of what the backing resource provides. + type: string + status: + description: status is the observed status of the shared configmap + type: object + properties: + conditions: + description: conditions represents any observations made on this particular shared resource by the underlying CSI driver or Share controller. + type: array + items: + description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, \n type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + type: object + required: + - lastTransitionTime + - message + - reason + - status + - type + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + type: string + format: date-time + message: + description: message is a human readable message indicating details about the transition. This may be an empty string. + type: string + maxLength: 32768 + observedGeneration: + description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. + type: integer + format: int64 + minimum: 0 + reason: + description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. + type: string + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + status: + description: status of the condition, one of True, False, Unknown. + type: string + enum: + - "True" + - "False" + - Unknown + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + type: string + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/sharedresource/v1alpha1/0000_10_sharedsecret.crd.yaml b/vendor/github.com/openshift/api/sharedresource/v1alpha1/0000_10_sharedsecret.crd.yaml new file mode 100644 index 000000000..2e3d5d36e --- /dev/null +++ b/vendor/github.com/openshift/api/sharedresource/v1alpha1/0000_10_sharedsecret.crd.yaml @@ -0,0 +1,105 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + api-approved.openshift.io: https://github.com/openshift/api/pull/979 + description: Extension for sharing Secrets across Namespaces + displayName: SharedSecret + name: sharedsecrets.sharedresource.openshift.io +spec: + group: sharedresource.openshift.io + names: + kind: SharedSecret + listKind: SharedSecretList + plural: sharedsecrets + singular: sharedsecret + scope: Cluster + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: "SharedSecret allows a Secret to be shared across namespaces. Pods can mount the shared Secret by adding a CSI volume to the pod specification using the \"csi.sharedresource.openshift.io\" CSI driver and a reference to the SharedSecret in the volume attributes: \n spec: volumes: - name: shared-secret csi: driver: csi.sharedresource.openshift.io volumeAttributes: sharedSecret: my-share \n For the mount to be successful, the pod's service account must be granted permission to 'use' the named SharedSecret object within its namespace with an appropriate Role and RoleBinding. For compactness, here are example `oc` invocations for creating such Role and RoleBinding objects. \n `oc create role shared-resource-my-share --verb=use --resource=sharedsecrets.sharedresource.openshift.io --resource-name=my-share` `oc create rolebinding shared-resource-my-share --role=shared-resource-my-share --serviceaccount=my-namespace:default` \n Shared resource objects, in this case Secrets, have default permissions of list, get, and watch for system authenticated users. \n Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. These capabilities should not be used by applications needing long term support." + type: object + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: spec is the specification of the desired shared secret + type: object + required: + - secretRef + properties: + description: + description: description is a user readable explanation of what the backing resource provides. + type: string + secretRef: + description: secretRef is a reference to the Secret to share + type: object + required: + - name + - namespace + properties: + name: + description: name represents the name of the Secret that is being referenced. + type: string + namespace: + description: namespace represents the namespace where the referenced Secret is located. + type: string + status: + description: status is the observed status of the shared secret + type: object + properties: + conditions: + description: conditions represents any observations made on this particular shared resource by the underlying CSI driver or Share controller. + type: array + items: + description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, \n type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + type: object + required: + - lastTransitionTime + - message + - reason + - status + - type + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + type: string + format: date-time + message: + description: message is a human readable message indicating details about the transition. This may be an empty string. + type: string + maxLength: 32768 + observedGeneration: + description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. + type: integer + format: int64 + minimum: 0 + reason: + description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. + type: string + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + status: + description: status of the condition, one of True, False, Unknown. + type: string + enum: + - "True" + - "False" + - Unknown + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + type: string + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + served: true + storage: true + subresources: + status: {} diff --git a/vendor/github.com/openshift/api/sharedresource/v1alpha1/Makefile b/vendor/github.com/openshift/api/sharedresource/v1alpha1/Makefile new file mode 100644 index 000000000..330157e5b --- /dev/null +++ b/vendor/github.com/openshift/api/sharedresource/v1alpha1/Makefile @@ -0,0 +1,3 @@ +.PHONY: test +test: + make -C ../../tests test GINKGO_EXTRA_ARGS=--focus="sharedresource.openshift.io/v1alpha1" diff --git a/vendor/github.com/openshift/api/sharedresource/v1alpha1/doc.go b/vendor/github.com/openshift/api/sharedresource/v1alpha1/doc.go new file mode 100644 index 000000000..833dd7f12 --- /dev/null +++ b/vendor/github.com/openshift/api/sharedresource/v1alpha1/doc.go @@ -0,0 +1,7 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true + +// +groupName=sharedresource.openshift.io +// Package v1alplha1 is the v1alpha1 version of the API. +package v1alpha1 diff --git a/vendor/github.com/openshift/api/sharedresource/v1alpha1/register.go b/vendor/github.com/openshift/api/sharedresource/v1alpha1/register.go new file mode 100644 index 000000000..c390b46fc --- /dev/null +++ b/vendor/github.com/openshift/api/sharedresource/v1alpha1/register.go @@ -0,0 +1,53 @@ +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +const ( + Version = "v1alpha1" + GroupName = "sharedresource.openshift.io" +) + +var ( + scheme = runtime.NewScheme() + GroupVersion = schema.GroupVersion{Group: GroupName, Version: Version} + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // Install is a function which adds this version to a scheme + Install = SchemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = SchemeBuilder.AddToScheme +) + +func init() { + AddToScheme(scheme) +} + +// addKnownTypes adds the set of types defined in this package to the supplied scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &SharedConfigMap{}, + &SharedConfigMapList{}, + &SharedSecret{}, + &SharedSecretList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} + +// Kind takes an unqualified kind and returns back a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} diff --git a/vendor/github.com/openshift/api/sharedresource/v1alpha1/stable.sharedconfigmap.testsuite.yaml b/vendor/github.com/openshift/api/sharedresource/v1alpha1/stable.sharedconfigmap.testsuite.yaml new file mode 100644 index 000000000..dc26d6aca --- /dev/null +++ b/vendor/github.com/openshift/api/sharedresource/v1alpha1/stable.sharedconfigmap.testsuite.yaml @@ -0,0 +1,20 @@ +apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this +name: "[Stable] SharedConfigMap" +crd: 0000_10_sharedconfigmap.crd.yaml +tests: + onCreate: + - name: Should be able to create a minimal SharedConfigMap + initial: | + apiVersion: sharedresource.openshift.io/v1alpha1 + kind: SharedConfigMap + spec: + configMapRef: + name: foo + namespace: foo + expected: | + apiVersion: sharedresource.openshift.io/v1alpha1 + kind: SharedConfigMap + spec: + configMapRef: + name: foo + namespace: foo diff --git a/vendor/github.com/openshift/api/sharedresource/v1alpha1/stable.sharedsecret.testsuite.yaml b/vendor/github.com/openshift/api/sharedresource/v1alpha1/stable.sharedsecret.testsuite.yaml new file mode 100644 index 000000000..14da75b1f --- /dev/null +++ b/vendor/github.com/openshift/api/sharedresource/v1alpha1/stable.sharedsecret.testsuite.yaml @@ -0,0 +1,20 @@ +apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this +name: "[Stable] SharedSecret" +crd: 0000_10_sharedsecret.crd.yaml +tests: + onCreate: + - name: Should be able to create a minimal SharedSecret + initial: | + apiVersion: sharedresource.openshift.io/v1alpha1 + kind: SharedSecret + spec: + secretRef: + name: foo + namespace: foo + expected: | + apiVersion: sharedresource.openshift.io/v1alpha1 + kind: SharedSecret + spec: + secretRef: + name: foo + namespace: foo diff --git a/vendor/github.com/openshift/api/sharedresource/v1alpha1/types_shared_configmap.go b/vendor/github.com/openshift/api/sharedresource/v1alpha1/types_shared_configmap.go new file mode 100644 index 000000000..8ffd0b412 --- /dev/null +++ b/vendor/github.com/openshift/api/sharedresource/v1alpha1/types_shared_configmap.go @@ -0,0 +1,93 @@ +package v1alpha1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// SharedConfigMap allows a ConfigMap to be shared across namespaces. +// Pods can mount the shared ConfigMap by adding a CSI volume to the pod specification using the +// "csi.sharedresource.openshift.io" CSI driver and a reference to the SharedConfigMap in the volume attributes: +// +// spec: +// volumes: +// - name: shared-configmap +// csi: +// driver: csi.sharedresource.openshift.io +// volumeAttributes: +// sharedConfigMap: my-share +// +// For the mount to be successful, the pod's service account must be granted permission to 'use' the named SharedConfigMap object +// within its namespace with an appropriate Role and RoleBinding. For compactness, here are example `oc` invocations for creating +// such Role and RoleBinding objects. +// +// `oc create role shared-resource-my-share --verb=use --resource=sharedconfigmaps.sharedresource.openshift.io --resource-name=my-share` +// `oc create rolebinding shared-resource-my-share --role=shared-resource-my-share --serviceaccount=my-namespace:default` +// +// Shared resource objects, in this case ConfigMaps, have default permissions of list, get, and watch for system authenticated users. +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// These capabilities should not be used by applications needing long term support. +// +k8s:openapi-gen=true +// +openshift:compatibility-gen:level=4 +// +kubebuilder:subresource:status +type SharedConfigMap struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec is the specification of the desired shared configmap + // +kubebuilder:validation:Required + Spec SharedConfigMapSpec `json:"spec,omitempty"` + + // status is the observed status of the shared configmap + Status SharedConfigMapStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// SharedConfigMapList contains a list of SharedConfigMap objects. +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +type SharedConfigMapList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty"` + + Items []SharedConfigMap `json:"items"` +} + +// SharedConfigMapReference contains information about which ConfigMap to share +type SharedConfigMapReference struct { + // name represents the name of the ConfigMap that is being referenced. + // +kubebuilder:validation:Required + Name string `json:"name"` + // namespace represents the namespace where the referenced ConfigMap is located. + // +kubebuilder:validation:Required + Namespace string `json:"namespace"` +} + +// SharedConfigMapSpec defines the desired state of a SharedConfigMap +// +k8s:openapi-gen=true +type SharedConfigMapSpec struct { + //configMapRef is a reference to the ConfigMap to share + // +kubebuilder:validation:Required + ConfigMapRef SharedConfigMapReference `json:"configMapRef"` + // description is a user readable explanation of what the backing resource provides. + Description string `json:"description,omitempty"` +} + +// SharedSecretStatus contains the observed status of the shared resource +type SharedConfigMapStatus struct { + // conditions represents any observations made on this particular shared resource by the underlying CSI driver or Share controller. + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` +} diff --git a/vendor/github.com/openshift/api/sharedresource/v1alpha1/types_shared_secret.go b/vendor/github.com/openshift/api/sharedresource/v1alpha1/types_shared_secret.go new file mode 100644 index 000000000..6a6fb2246 --- /dev/null +++ b/vendor/github.com/openshift/api/sharedresource/v1alpha1/types_shared_secret.go @@ -0,0 +1,93 @@ +package v1alpha1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// SharedSecret allows a Secret to be shared across namespaces. +// Pods can mount the shared Secret by adding a CSI volume to the pod specification using the +// "csi.sharedresource.openshift.io" CSI driver and a reference to the SharedSecret in the volume attributes: +// +// spec: +// volumes: +// - name: shared-secret +// csi: +// driver: csi.sharedresource.openshift.io +// volumeAttributes: +// sharedSecret: my-share +// +// For the mount to be successful, the pod's service account must be granted permission to 'use' the named SharedSecret object +// within its namespace with an appropriate Role and RoleBinding. For compactness, here are example `oc` invocations for creating +// such Role and RoleBinding objects. +// +// `oc create role shared-resource-my-share --verb=use --resource=sharedsecrets.sharedresource.openshift.io --resource-name=my-share` +// `oc create rolebinding shared-resource-my-share --role=shared-resource-my-share --serviceaccount=my-namespace:default` +// +// Shared resource objects, in this case Secrets, have default permissions of list, get, and watch for system authenticated users. +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +// +kubebuilder:subresource:status +// +type SharedSecret struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty"` + + // spec is the specification of the desired shared secret + // +kubebuilder:validation:Required + Spec SharedSecretSpec `json:"spec,omitempty"` + + // status is the observed status of the shared secret + Status SharedSecretStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// SharedSecretList contains a list of SharedSecret objects. +// +// Compatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. +// These capabilities should not be used by applications needing long term support. +// +openshift:compatibility-gen:level=4 +type SharedSecretList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty"` + + Items []SharedSecret `json:"items"` +} + +// SharedSecretReference contains information about which Secret to share +type SharedSecretReference struct { + // name represents the name of the Secret that is being referenced. + // +kubebuilder:validation:Required + Name string `json:"name"` + // namespace represents the namespace where the referenced Secret is located. + // +kubebuilder:validation:Required + Namespace string `json:"namespace"` +} + +// SharedSecretSpec defines the desired state of a SharedSecret +// +k8s:openapi-gen=true +type SharedSecretSpec struct { + // secretRef is a reference to the Secret to share + // +kubebuilder:validation:Required + SecretRef SharedSecretReference `json:"secretRef"` + // description is a user readable explanation of what the backing resource provides. + Description string `json:"description,omitempty"` +} + +// SharedSecretStatus contains the observed status of the shared resource +type SharedSecretStatus struct { + // conditions represents any observations made on this particular shared resource by the underlying CSI driver or Share controller. + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` +} diff --git a/vendor/github.com/openshift/api/sharedresource/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/sharedresource/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 000000000..25ecd3836 --- /dev/null +++ b/vendor/github.com/openshift/api/sharedresource/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,245 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SharedConfigMap) DeepCopyInto(out *SharedConfigMap) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SharedConfigMap. +func (in *SharedConfigMap) DeepCopy() *SharedConfigMap { + if in == nil { + return nil + } + out := new(SharedConfigMap) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SharedConfigMap) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SharedConfigMapList) DeepCopyInto(out *SharedConfigMapList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]SharedConfigMap, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SharedConfigMapList. +func (in *SharedConfigMapList) DeepCopy() *SharedConfigMapList { + if in == nil { + return nil + } + out := new(SharedConfigMapList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SharedConfigMapList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SharedConfigMapReference) DeepCopyInto(out *SharedConfigMapReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SharedConfigMapReference. +func (in *SharedConfigMapReference) DeepCopy() *SharedConfigMapReference { + if in == nil { + return nil + } + out := new(SharedConfigMapReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SharedConfigMapSpec) DeepCopyInto(out *SharedConfigMapSpec) { + *out = *in + out.ConfigMapRef = in.ConfigMapRef + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SharedConfigMapSpec. +func (in *SharedConfigMapSpec) DeepCopy() *SharedConfigMapSpec { + if in == nil { + return nil + } + out := new(SharedConfigMapSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SharedConfigMapStatus) DeepCopyInto(out *SharedConfigMapStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SharedConfigMapStatus. +func (in *SharedConfigMapStatus) DeepCopy() *SharedConfigMapStatus { + if in == nil { + return nil + } + out := new(SharedConfigMapStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SharedSecret) DeepCopyInto(out *SharedSecret) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SharedSecret. +func (in *SharedSecret) DeepCopy() *SharedSecret { + if in == nil { + return nil + } + out := new(SharedSecret) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SharedSecret) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SharedSecretList) DeepCopyInto(out *SharedSecretList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]SharedSecret, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SharedSecretList. +func (in *SharedSecretList) DeepCopy() *SharedSecretList { + if in == nil { + return nil + } + out := new(SharedSecretList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SharedSecretList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SharedSecretReference) DeepCopyInto(out *SharedSecretReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SharedSecretReference. +func (in *SharedSecretReference) DeepCopy() *SharedSecretReference { + if in == nil { + return nil + } + out := new(SharedSecretReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SharedSecretSpec) DeepCopyInto(out *SharedSecretSpec) { + *out = *in + out.SecretRef = in.SecretRef + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SharedSecretSpec. +func (in *SharedSecretSpec) DeepCopy() *SharedSecretSpec { + if in == nil { + return nil + } + out := new(SharedSecretSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SharedSecretStatus) DeepCopyInto(out *SharedSecretStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SharedSecretStatus. +func (in *SharedSecretStatus) DeepCopy() *SharedSecretStatus { + if in == nil { + return nil + } + out := new(SharedSecretStatus) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/openshift/api/sharedresource/v1alpha1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/sharedresource/v1alpha1/zz_generated.swagger_doc_generated.go new file mode 100644 index 000000000..f432d63f7 --- /dev/null +++ b/vendor/github.com/openshift/api/sharedresource/v1alpha1/zz_generated.swagger_doc_generated.go @@ -0,0 +1,112 @@ +package v1alpha1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_SharedConfigMap = map[string]string{ + "": "SharedConfigMap allows a ConfigMap to be shared across namespaces. Pods can mount the shared ConfigMap by adding a CSI volume to the pod specification using the \"csi.sharedresource.openshift.io\" CSI driver and a reference to the SharedConfigMap in the volume attributes:\n\nspec:\n volumes:\n - name: shared-configmap\n csi:\n driver: csi.sharedresource.openshift.io\n volumeAttributes:\n sharedConfigMap: my-share\n\nFor the mount to be successful, the pod's service account must be granted permission to 'use' the named SharedConfigMap object within its namespace with an appropriate Role and RoleBinding. For compactness, here are example `oc` invocations for creating such Role and RoleBinding objects.\n\n `oc create role shared-resource-my-share --verb=use --resource=sharedconfigmaps.sharedresource.openshift.io --resource-name=my-share`\n `oc create rolebinding shared-resource-my-share --role=shared-resource-my-share --serviceaccount=my-namespace:default`\n\nShared resource objects, in this case ConfigMaps, have default permissions of list, get, and watch for system authenticated users.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. These capabilities should not be used by applications needing long term support.", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec is the specification of the desired shared configmap", + "status": "status is the observed status of the shared configmap", +} + +func (SharedConfigMap) SwaggerDoc() map[string]string { + return map_SharedConfigMap +} + +var map_SharedConfigMapList = map[string]string{ + "": "SharedConfigMapList contains a list of SharedConfigMap objects.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. These capabilities should not be used by applications needing long term support.", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (SharedConfigMapList) SwaggerDoc() map[string]string { + return map_SharedConfigMapList +} + +var map_SharedConfigMapReference = map[string]string{ + "": "SharedConfigMapReference contains information about which ConfigMap to share", + "name": "name represents the name of the ConfigMap that is being referenced.", + "namespace": "namespace represents the namespace where the referenced ConfigMap is located.", +} + +func (SharedConfigMapReference) SwaggerDoc() map[string]string { + return map_SharedConfigMapReference +} + +var map_SharedConfigMapSpec = map[string]string{ + "": "SharedConfigMapSpec defines the desired state of a SharedConfigMap", + "configMapRef": "configMapRef is a reference to the ConfigMap to share", + "description": "description is a user readable explanation of what the backing resource provides.", +} + +func (SharedConfigMapSpec) SwaggerDoc() map[string]string { + return map_SharedConfigMapSpec +} + +var map_SharedConfigMapStatus = map[string]string{ + "": "SharedSecretStatus contains the observed status of the shared resource", + "conditions": "conditions represents any observations made on this particular shared resource by the underlying CSI driver or Share controller.", +} + +func (SharedConfigMapStatus) SwaggerDoc() map[string]string { + return map_SharedConfigMapStatus +} + +var map_SharedSecret = map[string]string{ + "": "SharedSecret allows a Secret to be shared across namespaces. Pods can mount the shared Secret by adding a CSI volume to the pod specification using the \"csi.sharedresource.openshift.io\" CSI driver and a reference to the SharedSecret in the volume attributes:\n\nspec:\n volumes:\n - name: shared-secret\n csi:\n driver: csi.sharedresource.openshift.io\n volumeAttributes:\n sharedSecret: my-share\n\nFor the mount to be successful, the pod's service account must be granted permission to 'use' the named SharedSecret object within its namespace with an appropriate Role and RoleBinding. For compactness, here are example `oc` invocations for creating such Role and RoleBinding objects.\n\n `oc create role shared-resource-my-share --verb=use --resource=sharedsecrets.sharedresource.openshift.io --resource-name=my-share`\n `oc create rolebinding shared-resource-my-share --role=shared-resource-my-share --serviceaccount=my-namespace:default`\n\nShared resource objects, in this case Secrets, have default permissions of list, get, and watch for system authenticated users.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. These capabilities should not be used by applications needing long term support.", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec is the specification of the desired shared secret", + "status": "status is the observed status of the shared secret", +} + +func (SharedSecret) SwaggerDoc() map[string]string { + return map_SharedSecret +} + +var map_SharedSecretList = map[string]string{ + "": "SharedSecretList contains a list of SharedSecret objects.\n\nCompatibility level 4: No compatibility is provided, the API can change at any point for any reason. These capabilities should not be used by applications needing long term support. These capabilities should not be used by applications needing long term support.", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (SharedSecretList) SwaggerDoc() map[string]string { + return map_SharedSecretList +} + +var map_SharedSecretReference = map[string]string{ + "": "SharedSecretReference contains information about which Secret to share", + "name": "name represents the name of the Secret that is being referenced.", + "namespace": "namespace represents the namespace where the referenced Secret is located.", +} + +func (SharedSecretReference) SwaggerDoc() map[string]string { + return map_SharedSecretReference +} + +var map_SharedSecretSpec = map[string]string{ + "": "SharedSecretSpec defines the desired state of a SharedSecret", + "secretRef": "secretRef is a reference to the Secret to share", + "description": "description is a user readable explanation of what the backing resource provides.", +} + +func (SharedSecretSpec) SwaggerDoc() map[string]string { + return map_SharedSecretSpec +} + +var map_SharedSecretStatus = map[string]string{ + "": "SharedSecretStatus contains the observed status of the shared resource", + "conditions": "conditions represents any observations made on this particular shared resource by the underlying CSI driver or Share controller.", +} + +func (SharedSecretStatus) SwaggerDoc() map[string]string { + return map_SharedSecretStatus +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/api/template/OWNERS b/vendor/github.com/openshift/api/template/OWNERS new file mode 100644 index 000000000..c1ece8b21 --- /dev/null +++ b/vendor/github.com/openshift/api/template/OWNERS @@ -0,0 +1,4 @@ +reviewers: + - bparees + - gabemontero + - jim-minter diff --git a/vendor/github.com/openshift/api/template/install.go b/vendor/github.com/openshift/api/template/install.go new file mode 100644 index 000000000..8a69398dd --- /dev/null +++ b/vendor/github.com/openshift/api/template/install.go @@ -0,0 +1,26 @@ +package template + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + templatev1 "github.com/openshift/api/template/v1" +) + +const ( + GroupName = "template.openshift.io" +) + +var ( + schemeBuilder = runtime.NewSchemeBuilder(templatev1.Install) + // Install is a function which adds every version of this group to a scheme + Install = schemeBuilder.AddToScheme +) + +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +func Kind(kind string) schema.GroupKind { + return schema.GroupKind{Group: GroupName, Kind: kind} +} diff --git a/vendor/github.com/openshift/api/template/v1/codec.go b/vendor/github.com/openshift/api/template/v1/codec.go new file mode 100644 index 000000000..9e9177ed6 --- /dev/null +++ b/vendor/github.com/openshift/api/template/v1/codec.go @@ -0,0 +1,33 @@ +package v1 + +import ( + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + + "github.com/openshift/api/pkg/serialization" +) + +var _ runtime.NestedObjectDecoder = &Template{} +var _ runtime.NestedObjectEncoder = &Template{} + +// DecodeNestedObjects decodes the object as a runtime.Unknown with JSON content. +func (c *Template) DecodeNestedObjects(d runtime.Decoder) error { + for i := range c.Objects { + if c.Objects[i].Object != nil { + continue + } + c.Objects[i].Object = &runtime.Unknown{ + ContentType: "application/json", + Raw: c.Objects[i].Raw, + } + } + return nil +} +func (c *Template) EncodeNestedObjects(e runtime.Encoder) error { + for i := range c.Objects { + if err := serialization.EncodeNestedRawExtension(unstructured.UnstructuredJSONScheme, &c.Objects[i]); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/openshift/api/template/v1/consts.go b/vendor/github.com/openshift/api/template/v1/consts.go new file mode 100644 index 000000000..cc8b49d55 --- /dev/null +++ b/vendor/github.com/openshift/api/template/v1/consts.go @@ -0,0 +1,16 @@ +package v1 + +const ( + // TemplateInstanceFinalizer is used to clean up the objects created by the template instance, + // when the template instance is deleted. + TemplateInstanceFinalizer = "template.openshift.io/finalizer" + + // TemplateInstanceOwner is a label applied to all objects created from a template instance + // which contains the uid of the template instance. + TemplateInstanceOwner = "template.openshift.io/template-instance-owner" + + // WaitForReadyAnnotation indicates that the TemplateInstance controller + // should wait for the object to be ready before reporting the template + // instantiation complete. + WaitForReadyAnnotation = "template.alpha.openshift.io/wait-for-ready" +) diff --git a/vendor/github.com/openshift/api/template/v1/doc.go b/vendor/github.com/openshift/api/template/v1/doc.go new file mode 100644 index 000000000..34f9f8d45 --- /dev/null +++ b/vendor/github.com/openshift/api/template/v1/doc.go @@ -0,0 +1,8 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=github.com/openshift/origin/pkg/template/apis/template +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true + +// +groupName=template.openshift.io +// Package v1 is the v1 version of the API. +package v1 diff --git a/vendor/github.com/openshift/api/template/v1/generated.pb.go b/vendor/github.com/openshift/api/template/v1/generated.pb.go new file mode 100644 index 000000000..df724d89d --- /dev/null +++ b/vendor/github.com/openshift/api/template/v1/generated.pb.go @@ -0,0 +1,4115 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/openshift/api/template/v1/generated.proto + +package v1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + v11 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *BrokerTemplateInstance) Reset() { *m = BrokerTemplateInstance{} } +func (*BrokerTemplateInstance) ProtoMessage() {} +func (*BrokerTemplateInstance) Descriptor() ([]byte, []int) { + return fileDescriptor_8d3ee9f55fa8363e, []int{0} +} +func (m *BrokerTemplateInstance) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BrokerTemplateInstance) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BrokerTemplateInstance) XXX_Merge(src proto.Message) { + xxx_messageInfo_BrokerTemplateInstance.Merge(m, src) +} +func (m *BrokerTemplateInstance) XXX_Size() int { + return m.Size() +} +func (m *BrokerTemplateInstance) XXX_DiscardUnknown() { + xxx_messageInfo_BrokerTemplateInstance.DiscardUnknown(m) +} + +var xxx_messageInfo_BrokerTemplateInstance proto.InternalMessageInfo + +func (m *BrokerTemplateInstanceList) Reset() { *m = BrokerTemplateInstanceList{} } +func (*BrokerTemplateInstanceList) ProtoMessage() {} +func (*BrokerTemplateInstanceList) Descriptor() ([]byte, []int) { + return fileDescriptor_8d3ee9f55fa8363e, []int{1} +} +func (m *BrokerTemplateInstanceList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BrokerTemplateInstanceList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BrokerTemplateInstanceList) XXX_Merge(src proto.Message) { + xxx_messageInfo_BrokerTemplateInstanceList.Merge(m, src) +} +func (m *BrokerTemplateInstanceList) XXX_Size() int { + return m.Size() +} +func (m *BrokerTemplateInstanceList) XXX_DiscardUnknown() { + xxx_messageInfo_BrokerTemplateInstanceList.DiscardUnknown(m) +} + +var xxx_messageInfo_BrokerTemplateInstanceList proto.InternalMessageInfo + +func (m *BrokerTemplateInstanceSpec) Reset() { *m = BrokerTemplateInstanceSpec{} } +func (*BrokerTemplateInstanceSpec) ProtoMessage() {} +func (*BrokerTemplateInstanceSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_8d3ee9f55fa8363e, []int{2} +} +func (m *BrokerTemplateInstanceSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BrokerTemplateInstanceSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BrokerTemplateInstanceSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_BrokerTemplateInstanceSpec.Merge(m, src) +} +func (m *BrokerTemplateInstanceSpec) XXX_Size() int { + return m.Size() +} +func (m *BrokerTemplateInstanceSpec) XXX_DiscardUnknown() { + xxx_messageInfo_BrokerTemplateInstanceSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_BrokerTemplateInstanceSpec proto.InternalMessageInfo + +func (m *ExtraValue) Reset() { *m = ExtraValue{} } +func (*ExtraValue) ProtoMessage() {} +func (*ExtraValue) Descriptor() ([]byte, []int) { + return fileDescriptor_8d3ee9f55fa8363e, []int{3} +} +func (m *ExtraValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExtraValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExtraValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExtraValue.Merge(m, src) +} +func (m *ExtraValue) XXX_Size() int { + return m.Size() +} +func (m *ExtraValue) XXX_DiscardUnknown() { + xxx_messageInfo_ExtraValue.DiscardUnknown(m) +} + +var xxx_messageInfo_ExtraValue proto.InternalMessageInfo + +func (m *Parameter) Reset() { *m = Parameter{} } +func (*Parameter) ProtoMessage() {} +func (*Parameter) Descriptor() ([]byte, []int) { + return fileDescriptor_8d3ee9f55fa8363e, []int{4} +} +func (m *Parameter) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Parameter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Parameter) XXX_Merge(src proto.Message) { + xxx_messageInfo_Parameter.Merge(m, src) +} +func (m *Parameter) XXX_Size() int { + return m.Size() +} +func (m *Parameter) XXX_DiscardUnknown() { + xxx_messageInfo_Parameter.DiscardUnknown(m) +} + +var xxx_messageInfo_Parameter proto.InternalMessageInfo + +func (m *Template) Reset() { *m = Template{} } +func (*Template) ProtoMessage() {} +func (*Template) Descriptor() ([]byte, []int) { + return fileDescriptor_8d3ee9f55fa8363e, []int{5} +} +func (m *Template) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Template) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Template) XXX_Merge(src proto.Message) { + xxx_messageInfo_Template.Merge(m, src) +} +func (m *Template) XXX_Size() int { + return m.Size() +} +func (m *Template) XXX_DiscardUnknown() { + xxx_messageInfo_Template.DiscardUnknown(m) +} + +var xxx_messageInfo_Template proto.InternalMessageInfo + +func (m *TemplateInstance) Reset() { *m = TemplateInstance{} } +func (*TemplateInstance) ProtoMessage() {} +func (*TemplateInstance) Descriptor() ([]byte, []int) { + return fileDescriptor_8d3ee9f55fa8363e, []int{6} +} +func (m *TemplateInstance) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TemplateInstance) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TemplateInstance) XXX_Merge(src proto.Message) { + xxx_messageInfo_TemplateInstance.Merge(m, src) +} +func (m *TemplateInstance) XXX_Size() int { + return m.Size() +} +func (m *TemplateInstance) XXX_DiscardUnknown() { + xxx_messageInfo_TemplateInstance.DiscardUnknown(m) +} + +var xxx_messageInfo_TemplateInstance proto.InternalMessageInfo + +func (m *TemplateInstanceCondition) Reset() { *m = TemplateInstanceCondition{} } +func (*TemplateInstanceCondition) ProtoMessage() {} +func (*TemplateInstanceCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_8d3ee9f55fa8363e, []int{7} +} +func (m *TemplateInstanceCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TemplateInstanceCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TemplateInstanceCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_TemplateInstanceCondition.Merge(m, src) +} +func (m *TemplateInstanceCondition) XXX_Size() int { + return m.Size() +} +func (m *TemplateInstanceCondition) XXX_DiscardUnknown() { + xxx_messageInfo_TemplateInstanceCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_TemplateInstanceCondition proto.InternalMessageInfo + +func (m *TemplateInstanceList) Reset() { *m = TemplateInstanceList{} } +func (*TemplateInstanceList) ProtoMessage() {} +func (*TemplateInstanceList) Descriptor() ([]byte, []int) { + return fileDescriptor_8d3ee9f55fa8363e, []int{8} +} +func (m *TemplateInstanceList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TemplateInstanceList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TemplateInstanceList) XXX_Merge(src proto.Message) { + xxx_messageInfo_TemplateInstanceList.Merge(m, src) +} +func (m *TemplateInstanceList) XXX_Size() int { + return m.Size() +} +func (m *TemplateInstanceList) XXX_DiscardUnknown() { + xxx_messageInfo_TemplateInstanceList.DiscardUnknown(m) +} + +var xxx_messageInfo_TemplateInstanceList proto.InternalMessageInfo + +func (m *TemplateInstanceObject) Reset() { *m = TemplateInstanceObject{} } +func (*TemplateInstanceObject) ProtoMessage() {} +func (*TemplateInstanceObject) Descriptor() ([]byte, []int) { + return fileDescriptor_8d3ee9f55fa8363e, []int{9} +} +func (m *TemplateInstanceObject) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TemplateInstanceObject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TemplateInstanceObject) XXX_Merge(src proto.Message) { + xxx_messageInfo_TemplateInstanceObject.Merge(m, src) +} +func (m *TemplateInstanceObject) XXX_Size() int { + return m.Size() +} +func (m *TemplateInstanceObject) XXX_DiscardUnknown() { + xxx_messageInfo_TemplateInstanceObject.DiscardUnknown(m) +} + +var xxx_messageInfo_TemplateInstanceObject proto.InternalMessageInfo + +func (m *TemplateInstanceRequester) Reset() { *m = TemplateInstanceRequester{} } +func (*TemplateInstanceRequester) ProtoMessage() {} +func (*TemplateInstanceRequester) Descriptor() ([]byte, []int) { + return fileDescriptor_8d3ee9f55fa8363e, []int{10} +} +func (m *TemplateInstanceRequester) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TemplateInstanceRequester) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TemplateInstanceRequester) XXX_Merge(src proto.Message) { + xxx_messageInfo_TemplateInstanceRequester.Merge(m, src) +} +func (m *TemplateInstanceRequester) XXX_Size() int { + return m.Size() +} +func (m *TemplateInstanceRequester) XXX_DiscardUnknown() { + xxx_messageInfo_TemplateInstanceRequester.DiscardUnknown(m) +} + +var xxx_messageInfo_TemplateInstanceRequester proto.InternalMessageInfo + +func (m *TemplateInstanceSpec) Reset() { *m = TemplateInstanceSpec{} } +func (*TemplateInstanceSpec) ProtoMessage() {} +func (*TemplateInstanceSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_8d3ee9f55fa8363e, []int{11} +} +func (m *TemplateInstanceSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TemplateInstanceSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TemplateInstanceSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_TemplateInstanceSpec.Merge(m, src) +} +func (m *TemplateInstanceSpec) XXX_Size() int { + return m.Size() +} +func (m *TemplateInstanceSpec) XXX_DiscardUnknown() { + xxx_messageInfo_TemplateInstanceSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_TemplateInstanceSpec proto.InternalMessageInfo + +func (m *TemplateInstanceStatus) Reset() { *m = TemplateInstanceStatus{} } +func (*TemplateInstanceStatus) ProtoMessage() {} +func (*TemplateInstanceStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_8d3ee9f55fa8363e, []int{12} +} +func (m *TemplateInstanceStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TemplateInstanceStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TemplateInstanceStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_TemplateInstanceStatus.Merge(m, src) +} +func (m *TemplateInstanceStatus) XXX_Size() int { + return m.Size() +} +func (m *TemplateInstanceStatus) XXX_DiscardUnknown() { + xxx_messageInfo_TemplateInstanceStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_TemplateInstanceStatus proto.InternalMessageInfo + +func (m *TemplateList) Reset() { *m = TemplateList{} } +func (*TemplateList) ProtoMessage() {} +func (*TemplateList) Descriptor() ([]byte, []int) { + return fileDescriptor_8d3ee9f55fa8363e, []int{13} +} +func (m *TemplateList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TemplateList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TemplateList) XXX_Merge(src proto.Message) { + xxx_messageInfo_TemplateList.Merge(m, src) +} +func (m *TemplateList) XXX_Size() int { + return m.Size() +} +func (m *TemplateList) XXX_DiscardUnknown() { + xxx_messageInfo_TemplateList.DiscardUnknown(m) +} + +var xxx_messageInfo_TemplateList proto.InternalMessageInfo + +func init() { + proto.RegisterType((*BrokerTemplateInstance)(nil), "github.com.openshift.api.template.v1.BrokerTemplateInstance") + proto.RegisterType((*BrokerTemplateInstanceList)(nil), "github.com.openshift.api.template.v1.BrokerTemplateInstanceList") + proto.RegisterType((*BrokerTemplateInstanceSpec)(nil), "github.com.openshift.api.template.v1.BrokerTemplateInstanceSpec") + proto.RegisterType((*ExtraValue)(nil), "github.com.openshift.api.template.v1.ExtraValue") + proto.RegisterType((*Parameter)(nil), "github.com.openshift.api.template.v1.Parameter") + proto.RegisterType((*Template)(nil), "github.com.openshift.api.template.v1.Template") + proto.RegisterMapType((map[string]string)(nil), "github.com.openshift.api.template.v1.Template.LabelsEntry") + proto.RegisterType((*TemplateInstance)(nil), "github.com.openshift.api.template.v1.TemplateInstance") + proto.RegisterType((*TemplateInstanceCondition)(nil), "github.com.openshift.api.template.v1.TemplateInstanceCondition") + proto.RegisterType((*TemplateInstanceList)(nil), "github.com.openshift.api.template.v1.TemplateInstanceList") + proto.RegisterType((*TemplateInstanceObject)(nil), "github.com.openshift.api.template.v1.TemplateInstanceObject") + proto.RegisterType((*TemplateInstanceRequester)(nil), "github.com.openshift.api.template.v1.TemplateInstanceRequester") + proto.RegisterMapType((map[string]ExtraValue)(nil), "github.com.openshift.api.template.v1.TemplateInstanceRequester.ExtraEntry") + proto.RegisterType((*TemplateInstanceSpec)(nil), "github.com.openshift.api.template.v1.TemplateInstanceSpec") + proto.RegisterType((*TemplateInstanceStatus)(nil), "github.com.openshift.api.template.v1.TemplateInstanceStatus") + proto.RegisterType((*TemplateList)(nil), "github.com.openshift.api.template.v1.TemplateList") +} + +func init() { + proto.RegisterFile("github.com/openshift/api/template/v1/generated.proto", fileDescriptor_8d3ee9f55fa8363e) +} + +var fileDescriptor_8d3ee9f55fa8363e = []byte{ + // 1246 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x57, 0x4d, 0x6f, 0x5b, 0x45, + 0x17, 0xf6, 0xf5, 0x57, 0xec, 0x71, 0xdb, 0x37, 0x9a, 0xb7, 0xaa, 0x2e, 0x96, 0x6a, 0x5b, 0xb7, + 0x15, 0x0a, 0xa8, 0xb9, 0x26, 0x51, 0x28, 0x25, 0x42, 0x02, 0x2e, 0x49, 0xab, 0x94, 0x14, 0xd0, + 0x24, 0x45, 0x08, 0xb2, 0x60, 0x7c, 0x3d, 0x76, 0x6e, 0xe3, 0xfb, 0xc1, 0xcc, 0x38, 0xd4, 0xbb, + 0x2e, 0xf8, 0x01, 0x2c, 0x59, 0xf2, 0x13, 0x58, 0xb2, 0x42, 0x62, 0x97, 0x65, 0xd9, 0x75, 0x01, + 0x16, 0x31, 0x2b, 0xfe, 0x00, 0x48, 0x65, 0x83, 0x66, 0xee, 0xdc, 0x0f, 0x7f, 0x51, 0x27, 0x95, + 0xda, 0x9d, 0xef, 0x99, 0xf3, 0x3c, 0x67, 0xce, 0x99, 0x33, 0xcf, 0x1c, 0x83, 0x8d, 0xae, 0xc3, + 0x0f, 0xfb, 0x2d, 0xd3, 0xf6, 0xdd, 0xa6, 0x1f, 0x10, 0x8f, 0x1d, 0x3a, 0x1d, 0xde, 0xc4, 0x81, + 0xd3, 0xe4, 0xc4, 0x0d, 0x7a, 0x98, 0x93, 0xe6, 0xf1, 0x5a, 0xb3, 0x4b, 0x3c, 0x42, 0x31, 0x27, + 0x6d, 0x33, 0xa0, 0x3e, 0xf7, 0xe1, 0xf5, 0x04, 0x65, 0xc6, 0x28, 0x13, 0x07, 0x8e, 0x19, 0xa1, + 0xcc, 0xe3, 0xb5, 0xea, 0x6a, 0x8a, 0xbb, 0xeb, 0x77, 0xfd, 0xa6, 0x04, 0xb7, 0xfa, 0x1d, 0xf9, + 0x25, 0x3f, 0xe4, 0xaf, 0x90, 0xb4, 0x6a, 0x1c, 0xdd, 0x62, 0xa6, 0xe3, 0xcb, 0xe0, 0xb6, 0x4f, + 0x67, 0x05, 0xae, 0x6e, 0x24, 0x3e, 0x2e, 0xb6, 0x0f, 0x1d, 0x8f, 0xd0, 0x41, 0x33, 0x38, 0xea, + 0x0a, 0x03, 0x6b, 0xba, 0x84, 0xe3, 0x59, 0xa8, 0xe6, 0x3c, 0x14, 0xed, 0x7b, 0xdc, 0x71, 0xc9, + 0x14, 0xe0, 0xe6, 0xb3, 0x00, 0xcc, 0x3e, 0x24, 0x2e, 0x9e, 0xc4, 0x19, 0x43, 0x0d, 0x5c, 0xb1, + 0xa8, 0x7f, 0x44, 0xe8, 0xbe, 0xaa, 0xc3, 0x8e, 0xc7, 0x38, 0xf6, 0x6c, 0x02, 0xbf, 0x04, 0x25, + 0xb1, 0xbd, 0x36, 0xe6, 0x58, 0xd7, 0x1a, 0xda, 0x4a, 0x65, 0xfd, 0x0d, 0x33, 0x8c, 0x62, 0xa6, + 0xa3, 0x98, 0xc1, 0x51, 0x57, 0x18, 0x98, 0x29, 0xbc, 0xcd, 0xe3, 0x35, 0xf3, 0xe3, 0xd6, 0x03, + 0x62, 0xf3, 0x7b, 0x84, 0x63, 0x0b, 0x9e, 0x0c, 0xeb, 0x99, 0xd1, 0xb0, 0x0e, 0x12, 0x1b, 0x8a, + 0x59, 0x61, 0x0b, 0xe4, 0x59, 0x40, 0x6c, 0x3d, 0x2b, 0xd9, 0xdf, 0x33, 0x17, 0x39, 0x23, 0x73, + 0xf6, 0x6e, 0xf7, 0x02, 0x62, 0x5b, 0x17, 0x54, 0xb4, 0xbc, 0xf8, 0x42, 0x92, 0xdb, 0xf8, 0x4d, + 0x03, 0xd5, 0xd9, 0x90, 0x5d, 0x87, 0x71, 0x78, 0x30, 0x95, 0xa4, 0xb9, 0x58, 0x92, 0x02, 0x2d, + 0x53, 0x5c, 0x56, 0x41, 0x4b, 0x91, 0x25, 0x95, 0x20, 0x06, 0x05, 0x87, 0x13, 0x97, 0xe9, 0xd9, + 0x46, 0x6e, 0xa5, 0xb2, 0xfe, 0xce, 0xf3, 0x64, 0x68, 0x5d, 0x54, 0x81, 0x0a, 0x3b, 0x82, 0x12, + 0x85, 0xcc, 0xc6, 0x37, 0xd9, 0x79, 0xf9, 0x89, 0x22, 0x40, 0x07, 0x2c, 0xf3, 0x09, 0xbb, 0xca, + 0xf3, 0x5a, 0x2a, 0x4f, 0x53, 0x74, 0x6f, 0x72, 0x74, 0x88, 0x74, 0x08, 0x25, 0x22, 0xa6, 0xae, + 0x62, 0x2e, 0x4f, 0x92, 0xa3, 0x29, 0x5a, 0xf8, 0x21, 0x28, 0x32, 0x62, 0x53, 0xc2, 0xd5, 0x79, + 0x2e, 0x14, 0xe0, 0x92, 0x0a, 0x50, 0xdc, 0x93, 0x50, 0xa4, 0x28, 0xa0, 0x09, 0x40, 0xcb, 0xf1, + 0xda, 0x8e, 0xd7, 0xdd, 0xd9, 0x62, 0x7a, 0xae, 0x91, 0x5b, 0x29, 0x5b, 0x97, 0x44, 0x23, 0x59, + 0xb1, 0x15, 0xa5, 0x3c, 0x8c, 0xb7, 0x00, 0xd8, 0x7e, 0xc8, 0x29, 0xfe, 0x14, 0xf7, 0xfa, 0x04, + 0xd6, 0xa3, 0xba, 0x6b, 0x12, 0x58, 0x9e, 0xac, 0xda, 0x66, 0xe9, 0xbb, 0xef, 0xeb, 0x99, 0x47, + 0xbf, 0x36, 0x32, 0xc6, 0x4f, 0x59, 0x50, 0xfe, 0x04, 0x53, 0xec, 0x12, 0x4e, 0x28, 0x6c, 0x80, + 0xbc, 0x87, 0xdd, 0xb0, 0x44, 0xe5, 0xa4, 0x9f, 0x3e, 0xc2, 0x2e, 0x41, 0x72, 0x05, 0xbe, 0x09, + 0x2a, 0x6d, 0x87, 0x05, 0x3d, 0x3c, 0x10, 0x46, 0x99, 0x6a, 0xd9, 0xfa, 0xbf, 0x72, 0xac, 0x6c, + 0x25, 0x4b, 0x28, 0xed, 0x27, 0x61, 0x84, 0xd9, 0xd4, 0x09, 0xb8, 0xe3, 0x7b, 0x7a, 0x6e, 0x02, + 0x96, 0x2c, 0xa1, 0xb4, 0x1f, 0xbc, 0x06, 0x0a, 0xc7, 0x22, 0x23, 0x3d, 0x2f, 0x01, 0x71, 0x0b, + 0xc8, 0x34, 0x51, 0xb8, 0x06, 0x6f, 0x80, 0x52, 0x74, 0xad, 0xf5, 0x82, 0xf4, 0x8b, 0x7b, 0xf2, + 0x8e, 0xb2, 0xa3, 0xd8, 0x43, 0xa4, 0xd8, 0xa1, 0xbe, 0xab, 0x17, 0xc7, 0x53, 0xbc, 0x4d, 0x7d, + 0x17, 0xc9, 0x15, 0xc1, 0x47, 0xc9, 0x57, 0x7d, 0x87, 0x92, 0xb6, 0xbe, 0xd4, 0xd0, 0x56, 0x4a, + 0x09, 0x1f, 0x52, 0x76, 0x14, 0x7b, 0x18, 0xff, 0xe4, 0x40, 0x29, 0xea, 0x8e, 0x17, 0xa0, 0x19, + 0xaf, 0x81, 0x25, 0x97, 0x30, 0x86, 0xbb, 0x51, 0xed, 0xff, 0xa7, 0xdc, 0x97, 0xee, 0x85, 0x66, + 0x14, 0xad, 0xc3, 0xcf, 0xc0, 0x92, 0x2f, 0x29, 0xc2, 0x06, 0xaa, 0xac, 0xaf, 0xce, 0xdd, 0x8b, + 0x52, 0x49, 0x13, 0xe1, 0xaf, 0xb7, 0x1f, 0x72, 0xe2, 0x31, 0xc7, 0xf7, 0x12, 0xe6, 0x70, 0x23, + 0x0c, 0x45, 0x74, 0xd0, 0x06, 0x20, 0x88, 0x7a, 0x86, 0xe9, 0x79, 0x49, 0xde, 0x5c, 0xec, 0x72, + 0xc7, 0xbd, 0x96, 0xe4, 0x19, 0x9b, 0x18, 0x4a, 0xd1, 0xc2, 0x43, 0x50, 0xec, 0xe1, 0x16, 0xe9, + 0x31, 0xbd, 0x20, 0x03, 0x6c, 0x2e, 0x16, 0x20, 0x3a, 0x0b, 0x73, 0x57, 0x82, 0xb7, 0x3d, 0x4e, + 0x07, 0xd6, 0x65, 0x15, 0xeb, 0x42, 0x98, 0x4a, 0xb8, 0x84, 0x14, 0x7f, 0xf5, 0x6d, 0x50, 0x49, + 0x39, 0xc3, 0x65, 0x90, 0x3b, 0x22, 0x83, 0xf0, 0x0e, 0x20, 0xf1, 0x13, 0x5e, 0x8e, 0xda, 0x50, + 0x96, 0x5c, 0xf5, 0xdd, 0x66, 0xf6, 0x96, 0x66, 0xfc, 0x98, 0x05, 0xcb, 0x2f, 0xe1, 0xe5, 0x38, + 0x18, 0x7b, 0x39, 0xce, 0x58, 0x99, 0x67, 0xbd, 0x19, 0xb0, 0x0d, 0x8a, 0x8c, 0x63, 0xde, 0x67, + 0xf2, 0x9e, 0x2e, 0xac, 0xdb, 0x53, 0xfc, 0x92, 0x23, 0x25, 0x71, 0xf2, 0x1b, 0x29, 0x6e, 0xe3, + 0xef, 0x2c, 0x78, 0x65, 0x12, 0xf2, 0x81, 0xef, 0xb5, 0x1d, 0x79, 0xf3, 0xdf, 0x07, 0x79, 0x3e, + 0x08, 0x22, 0x25, 0x5a, 0x8d, 0x76, 0xb9, 0x3f, 0x08, 0xc8, 0xd3, 0x61, 0xfd, 0xea, 0x5c, 0xa0, + 0x70, 0x40, 0x12, 0x0a, 0x77, 0xe3, 0x34, 0xc2, 0x9b, 0xb2, 0x31, 0xbe, 0x91, 0xa7, 0xc3, 0xfa, + 0x8c, 0x01, 0xc6, 0x8c, 0x99, 0xc6, 0xb7, 0x0b, 0x8f, 0x01, 0xec, 0x61, 0xc6, 0xf7, 0x29, 0xf6, + 0x58, 0x18, 0xc9, 0x71, 0x89, 0x2a, 0xd0, 0xeb, 0x8b, 0x1d, 0xaf, 0x40, 0x58, 0x55, 0xb5, 0x0b, + 0xb8, 0x3b, 0xc5, 0x86, 0x66, 0x44, 0x80, 0xaf, 0x82, 0x22, 0x25, 0x98, 0xf9, 0x9e, 0xd2, 0xc0, + 0xb8, 0x9c, 0x48, 0x5a, 0x91, 0x5a, 0x4d, 0x0b, 0x43, 0xe1, 0xbf, 0x85, 0xc1, 0xf8, 0x45, 0x03, + 0x97, 0x5f, 0xc2, 0x34, 0xf0, 0xc5, 0xf8, 0x34, 0x70, 0xf3, 0x7c, 0x5d, 0x35, 0x67, 0x0e, 0x38, + 0x00, 0x57, 0x26, 0x3d, 0xc3, 0x9b, 0x03, 0x2d, 0x90, 0xa3, 0xa4, 0x73, 0x96, 0x57, 0xbf, 0xa2, + 0x22, 0xe4, 0x10, 0xe9, 0x20, 0x01, 0x36, 0xfe, 0x9c, 0xd1, 0xab, 0xe2, 0x2d, 0x20, 0x4c, 0xbc, + 0x9a, 0x37, 0x40, 0xa9, 0xcf, 0x08, 0x4d, 0xbd, 0x9c, 0x71, 0x19, 0xee, 0x2b, 0x3b, 0x8a, 0x3d, + 0xe0, 0x55, 0x90, 0xeb, 0x3b, 0x6d, 0xd5, 0x93, 0x71, 0xa8, 0xfb, 0x3b, 0x5b, 0x48, 0xd8, 0xa1, + 0x01, 0x8a, 0x5d, 0xea, 0xf7, 0x83, 0xe8, 0xd5, 0x07, 0xe2, 0xac, 0xef, 0x48, 0x0b, 0x52, 0x2b, + 0xd0, 0x07, 0x05, 0x22, 0x5e, 0x7b, 0x25, 0xbd, 0x77, 0xcf, 0x57, 0xc9, 0x38, 0x01, 0x53, 0x8e, + 0x0e, 0xa1, 0x52, 0xc6, 0xd5, 0x95, 0x36, 0x14, 0xc6, 0xa9, 0x3e, 0x50, 0xe3, 0xc5, 0x3c, 0x81, + 0xbc, 0x9d, 0x16, 0x48, 0x21, 0x77, 0x0b, 0x6d, 0x28, 0x99, 0x58, 0xd2, 0x92, 0xfa, 0x43, 0x76, + 0xba, 0x3b, 0xe5, 0x2c, 0x77, 0x00, 0x4a, 0x11, 0x3a, 0xee, 0xce, 0x33, 0x25, 0x9e, 0x1c, 0x4b, + 0x64, 0x41, 0x31, 0xa3, 0x54, 0x8b, 0xf4, 0xf8, 0xb6, 0x32, 0xab, 0x53, 0x76, 0x7d, 0x1b, 0xf7, + 0x26, 0xdb, 0x05, 0xcc, 0x98, 0xdf, 0x7a, 0xa0, 0x4c, 0xa3, 0xf2, 0x2a, 0x91, 0x78, 0xf7, 0x39, + 0x4f, 0xc9, 0xba, 0x38, 0x1a, 0xd6, 0xcb, 0xf1, 0x27, 0x4a, 0x02, 0x18, 0x7f, 0x69, 0xd3, 0xdd, + 0x1f, 0xca, 0x17, 0x64, 0x00, 0xd8, 0x91, 0xa2, 0x85, 0xf3, 0xe0, 0xb9, 0x77, 0x12, 0x2b, 0x63, + 0xf2, 0x38, 0xc5, 0x26, 0x86, 0x52, 0x61, 0x60, 0x37, 0x99, 0x3c, 0xce, 0x34, 0xf9, 0xcf, 0xbe, + 0xc1, 0xf3, 0x07, 0x11, 0xe3, 0x67, 0x0d, 0x5c, 0x88, 0x40, 0x2f, 0x40, 0xc1, 0xf6, 0xc6, 0x15, + 0xec, 0xac, 0xed, 0x37, 0x53, 0xb9, 0xac, 0xbb, 0x27, 0xa7, 0xb5, 0xcc, 0xe3, 0xd3, 0x5a, 0xe6, + 0xc9, 0x69, 0x2d, 0xf3, 0x68, 0x54, 0xd3, 0x4e, 0x46, 0x35, 0xed, 0xf1, 0xa8, 0xa6, 0x3d, 0x19, + 0xd5, 0xb4, 0xdf, 0x47, 0x35, 0xed, 0xdb, 0x3f, 0x6a, 0x99, 0xcf, 0xaf, 0x2f, 0xf2, 0xb7, 0xff, + 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xd0, 0x61, 0xc4, 0xab, 0x1d, 0x10, 0x00, 0x00, +} + +func (m *BrokerTemplateInstance) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BrokerTemplateInstance) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BrokerTemplateInstance) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *BrokerTemplateInstanceList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BrokerTemplateInstanceList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BrokerTemplateInstanceList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *BrokerTemplateInstanceSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BrokerTemplateInstanceSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BrokerTemplateInstanceSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.BindingIDs) > 0 { + for iNdEx := len(m.BindingIDs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.BindingIDs[iNdEx]) + copy(dAtA[i:], m.BindingIDs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.BindingIDs[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + { + size, err := m.Secret.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.TemplateInstance.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m ExtraValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m ExtraValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m ExtraValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m) > 0 { + for iNdEx := len(m) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m[iNdEx]) + copy(dAtA[i:], m[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Parameter) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Parameter) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Parameter) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i-- + if m.Required { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 + i -= len(m.From) + copy(dAtA[i:], m.From) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.From))) + i-- + dAtA[i] = 0x32 + i -= len(m.Generate) + copy(dAtA[i:], m.Generate) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Generate))) + i-- + dAtA[i] = 0x2a + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x22 + i -= len(m.Description) + copy(dAtA[i:], m.Description) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) + i-- + dAtA[i] = 0x1a + i -= len(m.DisplayName) + copy(dAtA[i:], m.DisplayName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DisplayName))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Template) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Template) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Template) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ObjectLabels) > 0 { + keysForObjectLabels := make([]string, 0, len(m.ObjectLabels)) + for k := range m.ObjectLabels { + keysForObjectLabels = append(keysForObjectLabels, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForObjectLabels) + for iNdEx := len(keysForObjectLabels) - 1; iNdEx >= 0; iNdEx-- { + v := m.ObjectLabels[string(keysForObjectLabels[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForObjectLabels[iNdEx]) + copy(dAtA[i:], keysForObjectLabels[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForObjectLabels[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2a + } + } + if len(m.Parameters) > 0 { + for iNdEx := len(m.Parameters) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Parameters[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.Objects) > 0 { + for iNdEx := len(m.Objects) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Objects[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *TemplateInstance) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TemplateInstance) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TemplateInstance) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *TemplateInstanceCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TemplateInstanceCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TemplateInstanceCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *TemplateInstanceList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TemplateInstanceList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TemplateInstanceList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *TemplateInstanceObject) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TemplateInstanceObject) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TemplateInstanceObject) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Ref.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *TemplateInstanceRequester) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TemplateInstanceRequester) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TemplateInstanceRequester) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Extra) > 0 { + keysForExtra := make([]string, 0, len(m.Extra)) + for k := range m.Extra { + keysForExtra = append(keysForExtra, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForExtra) + for iNdEx := len(keysForExtra) - 1; iNdEx >= 0; iNdEx-- { + v := m.Extra[string(keysForExtra[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForExtra[iNdEx]) + copy(dAtA[i:], keysForExtra[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForExtra[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x22 + } + } + if len(m.Groups) > 0 { + for iNdEx := len(m.Groups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Groups[iNdEx]) + copy(dAtA[i:], m.Groups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Groups[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0x12 + i -= len(m.Username) + copy(dAtA[i:], m.Username) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Username))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *TemplateInstanceSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TemplateInstanceSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TemplateInstanceSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Requester != nil { + { + size, err := m.Requester.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Secret != nil { + { + size, err := m.Secret.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *TemplateInstanceStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TemplateInstanceStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TemplateInstanceStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Objects) > 0 { + for iNdEx := len(m.Objects) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Objects[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *TemplateList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TemplateList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TemplateList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *BrokerTemplateInstance) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *BrokerTemplateInstanceList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *BrokerTemplateInstanceSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.TemplateInstance.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Secret.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.BindingIDs) > 0 { + for _, s := range m.BindingIDs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m ExtraValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m) > 0 { + for _, s := range m { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *Parameter) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DisplayName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Description) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Value) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Generate) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.From) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n +} + +func (m *Template) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Objects) > 0 { + for _, e := range m.Objects { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Parameters) > 0 { + for _, e := range m.Parameters { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.ObjectLabels) > 0 { + for k, v := range m.ObjectLabels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *TemplateInstance) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *TemplateInstanceCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *TemplateInstanceList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *TemplateInstanceObject) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Ref.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *TemplateInstanceRequester) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Username) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Groups) > 0 { + for _, s := range m.Groups { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Extra) > 0 { + for k, v := range m.Extra { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *TemplateInstanceSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Secret != nil { + l = m.Secret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Requester != nil { + l = m.Requester.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *TemplateInstanceStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Objects) > 0 { + for _, e := range m.Objects { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *TemplateList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *BrokerTemplateInstance) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BrokerTemplateInstance{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "BrokerTemplateInstanceSpec", "BrokerTemplateInstanceSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *BrokerTemplateInstanceList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]BrokerTemplateInstance{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "BrokerTemplateInstance", "BrokerTemplateInstance", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&BrokerTemplateInstanceList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *BrokerTemplateInstanceSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BrokerTemplateInstanceSpec{`, + `TemplateInstance:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.TemplateInstance), "ObjectReference", "v11.ObjectReference", 1), `&`, ``, 1) + `,`, + `Secret:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Secret), "ObjectReference", "v11.ObjectReference", 1), `&`, ``, 1) + `,`, + `BindingIDs:` + fmt.Sprintf("%v", this.BindingIDs) + `,`, + `}`, + }, "") + return s +} +func (this *Parameter) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Parameter{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `DisplayName:` + fmt.Sprintf("%v", this.DisplayName) + `,`, + `Description:` + fmt.Sprintf("%v", this.Description) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `Generate:` + fmt.Sprintf("%v", this.Generate) + `,`, + `From:` + fmt.Sprintf("%v", this.From) + `,`, + `Required:` + fmt.Sprintf("%v", this.Required) + `,`, + `}`, + }, "") + return s +} +func (this *Template) String() string { + if this == nil { + return "nil" + } + repeatedStringForObjects := "[]RawExtension{" + for _, f := range this.Objects { + repeatedStringForObjects += fmt.Sprintf("%v", f) + "," + } + repeatedStringForObjects += "}" + repeatedStringForParameters := "[]Parameter{" + for _, f := range this.Parameters { + repeatedStringForParameters += strings.Replace(strings.Replace(f.String(), "Parameter", "Parameter", 1), `&`, ``, 1) + "," + } + repeatedStringForParameters += "}" + keysForObjectLabels := make([]string, 0, len(this.ObjectLabels)) + for k := range this.ObjectLabels { + keysForObjectLabels = append(keysForObjectLabels, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForObjectLabels) + mapStringForObjectLabels := "map[string]string{" + for _, k := range keysForObjectLabels { + mapStringForObjectLabels += fmt.Sprintf("%v: %v,", k, this.ObjectLabels[k]) + } + mapStringForObjectLabels += "}" + s := strings.Join([]string{`&Template{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `Objects:` + repeatedStringForObjects + `,`, + `Parameters:` + repeatedStringForParameters + `,`, + `ObjectLabels:` + mapStringForObjectLabels + `,`, + `}`, + }, "") + return s +} +func (this *TemplateInstance) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TemplateInstance{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "TemplateInstanceSpec", "TemplateInstanceSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "TemplateInstanceStatus", "TemplateInstanceStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *TemplateInstanceCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TemplateInstanceCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *TemplateInstanceList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]TemplateInstance{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "TemplateInstance", "TemplateInstance", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&TemplateInstanceList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *TemplateInstanceObject) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TemplateInstanceObject{`, + `Ref:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ref), "ObjectReference", "v11.ObjectReference", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *TemplateInstanceRequester) String() string { + if this == nil { + return "nil" + } + keysForExtra := make([]string, 0, len(this.Extra)) + for k := range this.Extra { + keysForExtra = append(keysForExtra, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForExtra) + mapStringForExtra := "map[string]ExtraValue{" + for _, k := range keysForExtra { + mapStringForExtra += fmt.Sprintf("%v: %v,", k, this.Extra[k]) + } + mapStringForExtra += "}" + s := strings.Join([]string{`&TemplateInstanceRequester{`, + `Username:` + fmt.Sprintf("%v", this.Username) + `,`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `Groups:` + fmt.Sprintf("%v", this.Groups) + `,`, + `Extra:` + mapStringForExtra + `,`, + `}`, + }, "") + return s +} +func (this *TemplateInstanceSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TemplateInstanceSpec{`, + `Template:` + strings.Replace(strings.Replace(this.Template.String(), "Template", "Template", 1), `&`, ``, 1) + `,`, + `Secret:` + strings.Replace(fmt.Sprintf("%v", this.Secret), "LocalObjectReference", "v11.LocalObjectReference", 1) + `,`, + `Requester:` + strings.Replace(this.Requester.String(), "TemplateInstanceRequester", "TemplateInstanceRequester", 1) + `,`, + `}`, + }, "") + return s +} +func (this *TemplateInstanceStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]TemplateInstanceCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "TemplateInstanceCondition", "TemplateInstanceCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + repeatedStringForObjects := "[]TemplateInstanceObject{" + for _, f := range this.Objects { + repeatedStringForObjects += strings.Replace(strings.Replace(f.String(), "TemplateInstanceObject", "TemplateInstanceObject", 1), `&`, ``, 1) + "," + } + repeatedStringForObjects += "}" + s := strings.Join([]string{`&TemplateInstanceStatus{`, + `Conditions:` + repeatedStringForConditions + `,`, + `Objects:` + repeatedStringForObjects + `,`, + `}`, + }, "") + return s +} +func (this *TemplateList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Template{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Template", "Template", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&TemplateList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *BrokerTemplateInstance) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BrokerTemplateInstance: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BrokerTemplateInstance: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BrokerTemplateInstanceList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BrokerTemplateInstanceList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BrokerTemplateInstanceList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, BrokerTemplateInstance{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BrokerTemplateInstanceSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BrokerTemplateInstanceSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BrokerTemplateInstanceSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TemplateInstance", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.TemplateInstance.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Secret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BindingIDs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.BindingIDs = append(m.BindingIDs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExtraValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExtraValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExtraValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + *m = append(*m, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Parameter) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Parameter: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Parameter: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DisplayName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DisplayName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Description = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Generate", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Generate = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.From = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Required", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Required = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Template) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Template: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Template: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Objects", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Objects = append(m.Objects, runtime.RawExtension{}) + if err := m.Objects[len(m.Objects)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Parameters = append(m.Parameters, Parameter{}) + if err := m.Parameters[len(m.Parameters)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectLabels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ObjectLabels == nil { + m.ObjectLabels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.ObjectLabels[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TemplateInstance) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TemplateInstance: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TemplateInstance: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TemplateInstanceCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TemplateInstanceCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TemplateInstanceCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = TemplateInstanceConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TemplateInstanceList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TemplateInstanceList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TemplateInstanceList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, TemplateInstance{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TemplateInstanceObject) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TemplateInstanceObject: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TemplateInstanceObject: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Ref.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TemplateInstanceRequester) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TemplateInstanceRequester: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TemplateInstanceRequester: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Username", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Username = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Groups = append(m.Groups, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Extra", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Extra == nil { + m.Extra = make(map[string]ExtraValue) + } + var mapkey string + mapvalue := &ExtraValue{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &ExtraValue{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Extra[mapkey] = *mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TemplateInstanceSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TemplateInstanceSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TemplateInstanceSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Secret == nil { + m.Secret = &v11.LocalObjectReference{} + } + if err := m.Secret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Requester", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Requester == nil { + m.Requester = &TemplateInstanceRequester{} + } + if err := m.Requester.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TemplateInstanceStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TemplateInstanceStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TemplateInstanceStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, TemplateInstanceCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Objects", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Objects = append(m.Objects, TemplateInstanceObject{}) + if err := m.Objects[len(m.Objects)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TemplateList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TemplateList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TemplateList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Template{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/openshift/api/template/v1/generated.proto b/vendor/github.com/openshift/api/template/v1/generated.proto new file mode 100644 index 000000000..24b37bcd7 --- /dev/null +++ b/vendor/github.com/openshift/api/template/v1/generated.proto @@ -0,0 +1,262 @@ + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package github.com.openshift.api.template.v1; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "github.com/openshift/api/template/v1"; + +// BrokerTemplateInstance holds the service broker-related state associated with +// a TemplateInstance. BrokerTemplateInstance is part of an experimental API. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message BrokerTemplateInstance { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // spec describes the state of this BrokerTemplateInstance. + optional BrokerTemplateInstanceSpec spec = 2; +} + +// BrokerTemplateInstanceList is a list of BrokerTemplateInstance objects. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message BrokerTemplateInstanceList { + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is a list of BrokerTemplateInstances + repeated BrokerTemplateInstance items = 2; +} + +// BrokerTemplateInstanceSpec describes the state of a BrokerTemplateInstance. +message BrokerTemplateInstanceSpec { + // templateinstance is a reference to a TemplateInstance object residing + // in a namespace. + optional k8s.io.api.core.v1.ObjectReference templateInstance = 1; + + // secret is a reference to a Secret object residing in a namespace, + // containing the necessary template parameters. + optional k8s.io.api.core.v1.ObjectReference secret = 2; + + // bindingids is a list of 'binding_id's provided during successive bind + // calls to the template service broker. + repeated string bindingIDs = 3; +} + +// ExtraValue masks the value so protobuf can generate +// +protobuf.nullable=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +message ExtraValue { + // items, if empty, will result in an empty slice + + repeated string items = 1; +} + +// Parameter defines a name/value variable that is to be processed during +// the Template to Config transformation. +message Parameter { + // Name must be set and it can be referenced in Template + // Items using ${PARAMETER_NAME}. Required. + optional string name = 1; + + // Optional: The name that will show in UI instead of parameter 'Name' + optional string displayName = 2; + + // Description of a parameter. Optional. + optional string description = 3; + + // Value holds the Parameter data. If specified, the generator will be + // ignored. The value replaces all occurrences of the Parameter ${Name} + // expression during the Template to Config transformation. Optional. + optional string value = 4; + + // generate specifies the generator to be used to generate random string + // from an input value specified by From field. The result string is + // stored into Value field. If empty, no generator is being used, leaving + // the result Value untouched. Optional. + // + // The only supported generator is "expression", which accepts a "from" + // value in the form of a simple regular expression containing the + // range expression "[a-zA-Z0-9]", and the length expression "a{length}". + // + // Examples: + // + // from | value + // ----------------------------- + // "test[0-9]{1}x" | "test7x" + // "[0-1]{8}" | "01001100" + // "0x[A-F0-9]{4}" | "0xB3AF" + // "[a-zA-Z0-9]{8}" | "hW4yQU5i" + optional string generate = 5; + + // From is an input value for the generator. Optional. + optional string from = 6; + + // Optional: Indicates the parameter must have a value. Defaults to false. + optional bool required = 7; +} + +// Template contains the inputs needed to produce a Config. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message Template { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // message is an optional instructional message that will + // be displayed when this template is instantiated. + // This field should inform the user how to utilize the newly created resources. + // Parameter substitution will be performed on the message before being + // displayed so that generated credentials and other parameters can be + // included in the output. + optional string message = 2; + + // objects is an array of resources to include in this template. + // If a namespace value is hardcoded in the object, it will be removed + // during template instantiation, however if the namespace value + // is, or contains, a ${PARAMETER_REFERENCE}, the resolved + // value after parameter substitution will be respected and the object + // will be created in that namespace. + // +kubebuilder:pruning:PreserveUnknownFields + repeated k8s.io.apimachinery.pkg.runtime.RawExtension objects = 3; + + // parameters is an optional array of Parameters used during the + // Template to Config transformation. + repeated Parameter parameters = 4; + + // labels is a optional set of labels that are applied to every + // object during the Template to Config transformation. + map labels = 5; +} + +// TemplateInstance requests and records the instantiation of a Template. +// TemplateInstance is part of an experimental API. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message TemplateInstance { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // spec describes the desired state of this TemplateInstance. + optional TemplateInstanceSpec spec = 2; + + // status describes the current state of this TemplateInstance. + // +optional + optional TemplateInstanceStatus status = 3; +} + +// TemplateInstanceCondition contains condition information for a +// TemplateInstance. +message TemplateInstanceCondition { + // Type of the condition, currently Ready or InstantiateFailure. + optional string type = 1; + + // Status of the condition, one of True, False or Unknown. + optional string status = 2; + + // LastTransitionTime is the last time a condition status transitioned from + // one state to another. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + + // Reason is a brief machine readable explanation for the condition's last + // transition. + optional string reason = 4; + + // Message is a human readable description of the details of the last + // transition, complementing reason. + optional string message = 5; +} + +// TemplateInstanceList is a list of TemplateInstance objects. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message TemplateInstanceList { + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is a list of Templateinstances + repeated TemplateInstance items = 2; +} + +// TemplateInstanceObject references an object created by a TemplateInstance. +message TemplateInstanceObject { + // ref is a reference to the created object. When used under .spec, only + // name and namespace are used; these can contain references to parameters + // which will be substituted following the usual rules. + optional k8s.io.api.core.v1.ObjectReference ref = 1; +} + +// TemplateInstanceRequester holds the identity of an agent requesting a +// template instantiation. +message TemplateInstanceRequester { + // username uniquely identifies this user among all active users. + optional string username = 1; + + // uid is a unique value that identifies this user across time; if this user is + // deleted and another user by the same name is added, they will have + // different UIDs. + optional string uid = 2; + + // groups represent the groups this user is a part of. + repeated string groups = 3; + + // extra holds additional information provided by the authenticator. + map extra = 4; +} + +// TemplateInstanceSpec describes the desired state of a TemplateInstance. +message TemplateInstanceSpec { + // template is a full copy of the template for instantiation. + optional Template template = 1; + + // secret is a reference to a Secret object containing the necessary + // template parameters. + optional k8s.io.api.core.v1.LocalObjectReference secret = 2; + + // requester holds the identity of the agent requesting the template + // instantiation. + // +optional + optional TemplateInstanceRequester requester = 3; +} + +// TemplateInstanceStatus describes the current state of a TemplateInstance. +message TemplateInstanceStatus { + // conditions represent the latest available observations of a + // TemplateInstance's current state. + repeated TemplateInstanceCondition conditions = 1; + + // Objects references the objects created by the TemplateInstance. + repeated TemplateInstanceObject objects = 2; +} + +// TemplateList is a list of Template objects. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message TemplateList { + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of templates + repeated Template items = 2; +} + diff --git a/vendor/github.com/openshift/api/template/v1/legacy.go b/vendor/github.com/openshift/api/template/v1/legacy.go new file mode 100644 index 000000000..9266f3ac9 --- /dev/null +++ b/vendor/github.com/openshift/api/template/v1/legacy.go @@ -0,0 +1,24 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + legacyGroupVersion = schema.GroupVersion{Group: "", Version: "v1"} + legacySchemeBuilder = runtime.NewSchemeBuilder(addLegacyKnownTypes, corev1.AddToScheme) + DeprecatedInstallWithoutGroup = legacySchemeBuilder.AddToScheme +) + +func addLegacyKnownTypes(scheme *runtime.Scheme) error { + types := []runtime.Object{ + &Template{}, + &TemplateList{}, + } + scheme.AddKnownTypes(legacyGroupVersion, types...) + scheme.AddKnownTypeWithName(legacyGroupVersion.WithKind("TemplateConfig"), &Template{}) + scheme.AddKnownTypeWithName(legacyGroupVersion.WithKind("ProcessedTemplate"), &Template{}) + return nil +} diff --git a/vendor/github.com/openshift/api/template/v1/register.go b/vendor/github.com/openshift/api/template/v1/register.go new file mode 100644 index 000000000..e34ff5610 --- /dev/null +++ b/vendor/github.com/openshift/api/template/v1/register.go @@ -0,0 +1,43 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + GroupName = "template.openshift.io" + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, corev1.AddToScheme) + // Install is a function which adds this version to a scheme + Install = schemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = schemeBuilder.AddToScheme +) + +// Resource generated code relies on this being here, but it logically belongs to the group +// DEPRECATED +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &Template{}, + &TemplateList{}, + &TemplateInstance{}, + &TemplateInstanceList{}, + &BrokerTemplateInstance{}, + &BrokerTemplateInstanceList{}, + ) + metav1.AddToGroupVersion(scheme, GroupVersion) + return nil +} diff --git a/vendor/github.com/openshift/api/template/v1/types.go b/vendor/github.com/openshift/api/template/v1/types.go new file mode 100644 index 000000000..9d95912b2 --- /dev/null +++ b/vendor/github.com/openshift/api/template/v1/types.go @@ -0,0 +1,294 @@ +package v1 + +import ( + "fmt" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Template contains the inputs needed to produce a Config. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type Template struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // message is an optional instructional message that will + // be displayed when this template is instantiated. + // This field should inform the user how to utilize the newly created resources. + // Parameter substitution will be performed on the message before being + // displayed so that generated credentials and other parameters can be + // included in the output. + Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"` + + // objects is an array of resources to include in this template. + // If a namespace value is hardcoded in the object, it will be removed + // during template instantiation, however if the namespace value + // is, or contains, a ${PARAMETER_REFERENCE}, the resolved + // value after parameter substitution will be respected and the object + // will be created in that namespace. + // +kubebuilder:pruning:PreserveUnknownFields + Objects []runtime.RawExtension `json:"objects" protobuf:"bytes,3,rep,name=objects"` + + // parameters is an optional array of Parameters used during the + // Template to Config transformation. + Parameters []Parameter `json:"parameters,omitempty" protobuf:"bytes,4,rep,name=parameters"` + + // labels is a optional set of labels that are applied to every + // object during the Template to Config transformation. + ObjectLabels map[string]string `json:"labels,omitempty" protobuf:"bytes,5,rep,name=labels"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// TemplateList is a list of Template objects. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type TemplateList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of templates + Items []Template `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// Parameter defines a name/value variable that is to be processed during +// the Template to Config transformation. +type Parameter struct { + // Name must be set and it can be referenced in Template + // Items using ${PARAMETER_NAME}. Required. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + + // Optional: The name that will show in UI instead of parameter 'Name' + DisplayName string `json:"displayName,omitempty" protobuf:"bytes,2,opt,name=displayName"` + + // Description of a parameter. Optional. + Description string `json:"description,omitempty" protobuf:"bytes,3,opt,name=description"` + + // Value holds the Parameter data. If specified, the generator will be + // ignored. The value replaces all occurrences of the Parameter ${Name} + // expression during the Template to Config transformation. Optional. + Value string `json:"value,omitempty" protobuf:"bytes,4,opt,name=value"` + + // generate specifies the generator to be used to generate random string + // from an input value specified by From field. The result string is + // stored into Value field. If empty, no generator is being used, leaving + // the result Value untouched. Optional. + // + // The only supported generator is "expression", which accepts a "from" + // value in the form of a simple regular expression containing the + // range expression "[a-zA-Z0-9]", and the length expression "a{length}". + // + // Examples: + // + // from | value + // ----------------------------- + // "test[0-9]{1}x" | "test7x" + // "[0-1]{8}" | "01001100" + // "0x[A-F0-9]{4}" | "0xB3AF" + // "[a-zA-Z0-9]{8}" | "hW4yQU5i" + // + Generate string `json:"generate,omitempty" protobuf:"bytes,5,opt,name=generate"` + + // From is an input value for the generator. Optional. + From string `json:"from,omitempty" protobuf:"bytes,6,opt,name=from"` + + // Optional: Indicates the parameter must have a value. Defaults to false. + Required bool `json:"required,omitempty" protobuf:"varint,7,opt,name=required"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// TemplateInstance requests and records the instantiation of a Template. +// TemplateInstance is part of an experimental API. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type TemplateInstance struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // spec describes the desired state of this TemplateInstance. + Spec TemplateInstanceSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + + // status describes the current state of this TemplateInstance. + // +optional + Status TemplateInstanceStatus `json:"status" protobuf:"bytes,3,opt,name=status"` +} + +// TemplateInstanceSpec describes the desired state of a TemplateInstance. +type TemplateInstanceSpec struct { + // template is a full copy of the template for instantiation. + Template Template `json:"template" protobuf:"bytes,1,opt,name=template"` + + // secret is a reference to a Secret object containing the necessary + // template parameters. + Secret *corev1.LocalObjectReference `json:"secret,omitempty" protobuf:"bytes,2,opt,name=secret"` + + // requester holds the identity of the agent requesting the template + // instantiation. + // +optional + Requester *TemplateInstanceRequester `json:"requester" protobuf:"bytes,3,opt,name=requester"` +} + +// TemplateInstanceRequester holds the identity of an agent requesting a +// template instantiation. +type TemplateInstanceRequester struct { + // username uniquely identifies this user among all active users. + Username string `json:"username,omitempty" protobuf:"bytes,1,opt,name=username"` + + // uid is a unique value that identifies this user across time; if this user is + // deleted and another user by the same name is added, they will have + // different UIDs. + UID string `json:"uid,omitempty" protobuf:"bytes,2,opt,name=uid"` + + // groups represent the groups this user is a part of. + Groups []string `json:"groups,omitempty" protobuf:"bytes,3,rep,name=groups"` + + // extra holds additional information provided by the authenticator. + Extra map[string]ExtraValue `json:"extra,omitempty" protobuf:"bytes,4,rep,name=extra"` +} + +// ExtraValue masks the value so protobuf can generate +// +protobuf.nullable=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +type ExtraValue []string + +func (t ExtraValue) String() string { + return fmt.Sprintf("%v", []string(t)) +} + +// TemplateInstanceStatus describes the current state of a TemplateInstance. +type TemplateInstanceStatus struct { + // conditions represent the latest available observations of a + // TemplateInstance's current state. + Conditions []TemplateInstanceCondition `json:"conditions,omitempty" protobuf:"bytes,1,rep,name=conditions"` + + // Objects references the objects created by the TemplateInstance. + Objects []TemplateInstanceObject `json:"objects,omitempty" protobuf:"bytes,2,rep,name=objects"` +} + +// TemplateInstanceCondition contains condition information for a +// TemplateInstance. +type TemplateInstanceCondition struct { + // Type of the condition, currently Ready or InstantiateFailure. + Type TemplateInstanceConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=TemplateInstanceConditionType"` + // Status of the condition, one of True, False or Unknown. + Status corev1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status"` + // LastTransitionTime is the last time a condition status transitioned from + // one state to another. + LastTransitionTime metav1.Time `json:"lastTransitionTime" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // Reason is a brief machine readable explanation for the condition's last + // transition. + Reason string `json:"reason" protobuf:"bytes,4,opt,name=reason"` + // Message is a human readable description of the details of the last + // transition, complementing reason. + Message string `json:"message" protobuf:"bytes,5,opt,name=message"` +} + +// TemplateInstanceConditionType is the type of condition pertaining to a +// TemplateInstance. +type TemplateInstanceConditionType string + +const ( + // TemplateInstanceReady indicates the readiness of the template + // instantiation. + TemplateInstanceReady TemplateInstanceConditionType = "Ready" + // TemplateInstanceInstantiateFailure indicates the failure of the template + // instantiation + TemplateInstanceInstantiateFailure TemplateInstanceConditionType = "InstantiateFailure" +) + +// TemplateInstanceObject references an object created by a TemplateInstance. +type TemplateInstanceObject struct { + // ref is a reference to the created object. When used under .spec, only + // name and namespace are used; these can contain references to parameters + // which will be substituted following the usual rules. + Ref corev1.ObjectReference `json:"ref,omitempty" protobuf:"bytes,1,opt,name=ref"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// TemplateInstanceList is a list of TemplateInstance objects. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type TemplateInstanceList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is a list of Templateinstances + Items []TemplateInstance `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// BrokerTemplateInstance holds the service broker-related state associated with +// a TemplateInstance. BrokerTemplateInstance is part of an experimental API. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type BrokerTemplateInstance struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // spec describes the state of this BrokerTemplateInstance. + Spec BrokerTemplateInstanceSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` +} + +// BrokerTemplateInstanceSpec describes the state of a BrokerTemplateInstance. +type BrokerTemplateInstanceSpec struct { + // templateinstance is a reference to a TemplateInstance object residing + // in a namespace. + TemplateInstance corev1.ObjectReference `json:"templateInstance" protobuf:"bytes,1,opt,name=templateInstance"` + + // secret is a reference to a Secret object residing in a namespace, + // containing the necessary template parameters. + Secret corev1.ObjectReference `json:"secret" protobuf:"bytes,2,opt,name=secret"` + + // bindingids is a list of 'binding_id's provided during successive bind + // calls to the template service broker. + BindingIDs []string `json:"bindingIDs,omitempty" protobuf:"bytes,3,rep,name=bindingIDs"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// BrokerTemplateInstanceList is a list of BrokerTemplateInstance objects. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type BrokerTemplateInstanceList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is a list of BrokerTemplateInstances + Items []BrokerTemplateInstance `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/github.com/openshift/api/template/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/template/v1/zz_generated.deepcopy.go new file mode 100644 index 000000000..ff14f246b --- /dev/null +++ b/vendor/github.com/openshift/api/template/v1/zz_generated.deepcopy.go @@ -0,0 +1,394 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BrokerTemplateInstance) DeepCopyInto(out *BrokerTemplateInstance) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BrokerTemplateInstance. +func (in *BrokerTemplateInstance) DeepCopy() *BrokerTemplateInstance { + if in == nil { + return nil + } + out := new(BrokerTemplateInstance) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BrokerTemplateInstance) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BrokerTemplateInstanceList) DeepCopyInto(out *BrokerTemplateInstanceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]BrokerTemplateInstance, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BrokerTemplateInstanceList. +func (in *BrokerTemplateInstanceList) DeepCopy() *BrokerTemplateInstanceList { + if in == nil { + return nil + } + out := new(BrokerTemplateInstanceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *BrokerTemplateInstanceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BrokerTemplateInstanceSpec) DeepCopyInto(out *BrokerTemplateInstanceSpec) { + *out = *in + out.TemplateInstance = in.TemplateInstance + out.Secret = in.Secret + if in.BindingIDs != nil { + in, out := &in.BindingIDs, &out.BindingIDs + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BrokerTemplateInstanceSpec. +func (in *BrokerTemplateInstanceSpec) DeepCopy() *BrokerTemplateInstanceSpec { + if in == nil { + return nil + } + out := new(BrokerTemplateInstanceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in ExtraValue) DeepCopyInto(out *ExtraValue) { + { + in := &in + *out = make(ExtraValue, len(*in)) + copy(*out, *in) + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtraValue. +func (in ExtraValue) DeepCopy() ExtraValue { + if in == nil { + return nil + } + out := new(ExtraValue) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Parameter) DeepCopyInto(out *Parameter) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Parameter. +func (in *Parameter) DeepCopy() *Parameter { + if in == nil { + return nil + } + out := new(Parameter) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Template) DeepCopyInto(out *Template) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Objects != nil { + in, out := &in.Objects, &out.Objects + *out = make([]runtime.RawExtension, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make([]Parameter, len(*in)) + copy(*out, *in) + } + if in.ObjectLabels != nil { + in, out := &in.ObjectLabels, &out.ObjectLabels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Template. +func (in *Template) DeepCopy() *Template { + if in == nil { + return nil + } + out := new(Template) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Template) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TemplateInstance) DeepCopyInto(out *TemplateInstance) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateInstance. +func (in *TemplateInstance) DeepCopy() *TemplateInstance { + if in == nil { + return nil + } + out := new(TemplateInstance) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TemplateInstance) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TemplateInstanceCondition) DeepCopyInto(out *TemplateInstanceCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateInstanceCondition. +func (in *TemplateInstanceCondition) DeepCopy() *TemplateInstanceCondition { + if in == nil { + return nil + } + out := new(TemplateInstanceCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TemplateInstanceList) DeepCopyInto(out *TemplateInstanceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]TemplateInstance, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateInstanceList. +func (in *TemplateInstanceList) DeepCopy() *TemplateInstanceList { + if in == nil { + return nil + } + out := new(TemplateInstanceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TemplateInstanceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TemplateInstanceObject) DeepCopyInto(out *TemplateInstanceObject) { + *out = *in + out.Ref = in.Ref + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateInstanceObject. +func (in *TemplateInstanceObject) DeepCopy() *TemplateInstanceObject { + if in == nil { + return nil + } + out := new(TemplateInstanceObject) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TemplateInstanceRequester) DeepCopyInto(out *TemplateInstanceRequester) { + *out = *in + if in.Groups != nil { + in, out := &in.Groups, &out.Groups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Extra != nil { + in, out := &in.Extra, &out.Extra + *out = make(map[string]ExtraValue, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make(ExtraValue, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateInstanceRequester. +func (in *TemplateInstanceRequester) DeepCopy() *TemplateInstanceRequester { + if in == nil { + return nil + } + out := new(TemplateInstanceRequester) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TemplateInstanceSpec) DeepCopyInto(out *TemplateInstanceSpec) { + *out = *in + in.Template.DeepCopyInto(&out.Template) + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + *out = new(corev1.LocalObjectReference) + **out = **in + } + if in.Requester != nil { + in, out := &in.Requester, &out.Requester + *out = new(TemplateInstanceRequester) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateInstanceSpec. +func (in *TemplateInstanceSpec) DeepCopy() *TemplateInstanceSpec { + if in == nil { + return nil + } + out := new(TemplateInstanceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TemplateInstanceStatus) DeepCopyInto(out *TemplateInstanceStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]TemplateInstanceCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Objects != nil { + in, out := &in.Objects, &out.Objects + *out = make([]TemplateInstanceObject, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateInstanceStatus. +func (in *TemplateInstanceStatus) DeepCopy() *TemplateInstanceStatus { + if in == nil { + return nil + } + out := new(TemplateInstanceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TemplateList) DeepCopyInto(out *TemplateList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Template, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateList. +func (in *TemplateList) DeepCopy() *TemplateList { + if in == nil { + return nil + } + out := new(TemplateList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TemplateList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/vendor/github.com/openshift/api/template/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/template/v1/zz_generated.swagger_doc_generated.go new file mode 100644 index 000000000..8ed3822c8 --- /dev/null +++ b/vendor/github.com/openshift/api/template/v1/zz_generated.swagger_doc_generated.go @@ -0,0 +1,159 @@ +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_BrokerTemplateInstance = map[string]string{ + "": "BrokerTemplateInstance holds the service broker-related state associated with a TemplateInstance. BrokerTemplateInstance is part of an experimental API.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec describes the state of this BrokerTemplateInstance.", +} + +func (BrokerTemplateInstance) SwaggerDoc() map[string]string { + return map_BrokerTemplateInstance +} + +var map_BrokerTemplateInstanceList = map[string]string{ + "": "BrokerTemplateInstanceList is a list of BrokerTemplateInstance objects.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is a list of BrokerTemplateInstances", +} + +func (BrokerTemplateInstanceList) SwaggerDoc() map[string]string { + return map_BrokerTemplateInstanceList +} + +var map_BrokerTemplateInstanceSpec = map[string]string{ + "": "BrokerTemplateInstanceSpec describes the state of a BrokerTemplateInstance.", + "templateInstance": "templateinstance is a reference to a TemplateInstance object residing in a namespace.", + "secret": "secret is a reference to a Secret object residing in a namespace, containing the necessary template parameters.", + "bindingIDs": "bindingids is a list of 'binding_id's provided during successive bind calls to the template service broker.", +} + +func (BrokerTemplateInstanceSpec) SwaggerDoc() map[string]string { + return map_BrokerTemplateInstanceSpec +} + +var map_Parameter = map[string]string{ + "": "Parameter defines a name/value variable that is to be processed during the Template to Config transformation.", + "name": "Name must be set and it can be referenced in Template Items using ${PARAMETER_NAME}. Required.", + "displayName": "Optional: The name that will show in UI instead of parameter 'Name'", + "description": "Description of a parameter. Optional.", + "value": "Value holds the Parameter data. If specified, the generator will be ignored. The value replaces all occurrences of the Parameter ${Name} expression during the Template to Config transformation. Optional.", + "generate": "generate specifies the generator to be used to generate random string from an input value specified by From field. The result string is stored into Value field. If empty, no generator is being used, leaving the result Value untouched. Optional.\n\nThe only supported generator is \"expression\", which accepts a \"from\" value in the form of a simple regular expression containing the range expression \"[a-zA-Z0-9]\", and the length expression \"a{length}\".\n\nExamples:\n\nfrom | value", + "from": "From is an input value for the generator. Optional.", + "required": "Optional: Indicates the parameter must have a value. Defaults to false.", +} + +func (Parameter) SwaggerDoc() map[string]string { + return map_Parameter +} + +var map_Template = map[string]string{ + "": "Template contains the inputs needed to produce a Config.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "message": "message is an optional instructional message that will be displayed when this template is instantiated. This field should inform the user how to utilize the newly created resources. Parameter substitution will be performed on the message before being displayed so that generated credentials and other parameters can be included in the output.", + "objects": "objects is an array of resources to include in this template. If a namespace value is hardcoded in the object, it will be removed during template instantiation, however if the namespace value is, or contains, a ${PARAMETER_REFERENCE}, the resolved value after parameter substitution will be respected and the object will be created in that namespace.", + "parameters": "parameters is an optional array of Parameters used during the Template to Config transformation.", + "labels": "labels is a optional set of labels that are applied to every object during the Template to Config transformation.", +} + +func (Template) SwaggerDoc() map[string]string { + return map_Template +} + +var map_TemplateInstance = map[string]string{ + "": "TemplateInstance requests and records the instantiation of a Template. TemplateInstance is part of an experimental API.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec describes the desired state of this TemplateInstance.", + "status": "status describes the current state of this TemplateInstance.", +} + +func (TemplateInstance) SwaggerDoc() map[string]string { + return map_TemplateInstance +} + +var map_TemplateInstanceCondition = map[string]string{ + "": "TemplateInstanceCondition contains condition information for a TemplateInstance.", + "type": "Type of the condition, currently Ready or InstantiateFailure.", + "status": "Status of the condition, one of True, False or Unknown.", + "lastTransitionTime": "LastTransitionTime is the last time a condition status transitioned from one state to another.", + "reason": "Reason is a brief machine readable explanation for the condition's last transition.", + "message": "Message is a human readable description of the details of the last transition, complementing reason.", +} + +func (TemplateInstanceCondition) SwaggerDoc() map[string]string { + return map_TemplateInstanceCondition +} + +var map_TemplateInstanceList = map[string]string{ + "": "TemplateInstanceList is a list of TemplateInstance objects.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is a list of Templateinstances", +} + +func (TemplateInstanceList) SwaggerDoc() map[string]string { + return map_TemplateInstanceList +} + +var map_TemplateInstanceObject = map[string]string{ + "": "TemplateInstanceObject references an object created by a TemplateInstance.", + "ref": "ref is a reference to the created object. When used under .spec, only name and namespace are used; these can contain references to parameters which will be substituted following the usual rules.", +} + +func (TemplateInstanceObject) SwaggerDoc() map[string]string { + return map_TemplateInstanceObject +} + +var map_TemplateInstanceRequester = map[string]string{ + "": "TemplateInstanceRequester holds the identity of an agent requesting a template instantiation.", + "username": "username uniquely identifies this user among all active users.", + "uid": "uid is a unique value that identifies this user across time; if this user is deleted and another user by the same name is added, they will have different UIDs.", + "groups": "groups represent the groups this user is a part of.", + "extra": "extra holds additional information provided by the authenticator.", +} + +func (TemplateInstanceRequester) SwaggerDoc() map[string]string { + return map_TemplateInstanceRequester +} + +var map_TemplateInstanceSpec = map[string]string{ + "": "TemplateInstanceSpec describes the desired state of a TemplateInstance.", + "template": "template is a full copy of the template for instantiation.", + "secret": "secret is a reference to a Secret object containing the necessary template parameters.", + "requester": "requester holds the identity of the agent requesting the template instantiation.", +} + +func (TemplateInstanceSpec) SwaggerDoc() map[string]string { + return map_TemplateInstanceSpec +} + +var map_TemplateInstanceStatus = map[string]string{ + "": "TemplateInstanceStatus describes the current state of a TemplateInstance.", + "conditions": "conditions represent the latest available observations of a TemplateInstance's current state.", + "objects": "Objects references the objects created by the TemplateInstance.", +} + +func (TemplateInstanceStatus) SwaggerDoc() map[string]string { + return map_TemplateInstanceStatus +} + +var map_TemplateList = map[string]string{ + "": "TemplateList is a list of Template objects.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "Items is a list of templates", +} + +func (TemplateList) SwaggerDoc() map[string]string { + return map_TemplateList +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/api/user/install.go b/vendor/github.com/openshift/api/user/install.go new file mode 100644 index 000000000..28d498062 --- /dev/null +++ b/vendor/github.com/openshift/api/user/install.go @@ -0,0 +1,26 @@ +package user + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + userv1 "github.com/openshift/api/user/v1" +) + +const ( + GroupName = "user.openshift.io" +) + +var ( + schemeBuilder = runtime.NewSchemeBuilder(userv1.Install) + // Install is a function which adds every version of this group to a scheme + Install = schemeBuilder.AddToScheme +) + +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +func Kind(kind string) schema.GroupKind { + return schema.GroupKind{Group: GroupName, Kind: kind} +} diff --git a/vendor/github.com/openshift/api/user/v1/doc.go b/vendor/github.com/openshift/api/user/v1/doc.go new file mode 100644 index 000000000..42287095e --- /dev/null +++ b/vendor/github.com/openshift/api/user/v1/doc.go @@ -0,0 +1,8 @@ +// +k8s:deepcopy-gen=package,register +// +k8s:conversion-gen=github.com/openshift/origin/pkg/user/apis/user +// +k8s:defaulter-gen=TypeMeta +// +k8s:openapi-gen=true + +// +groupName=user.openshift.io +// Package v1 is the v1 version of the API. +package v1 diff --git a/vendor/github.com/openshift/api/user/v1/generated.pb.go b/vendor/github.com/openshift/api/user/v1/generated.pb.go new file mode 100644 index 000000000..0689ed389 --- /dev/null +++ b/vendor/github.com/openshift/api/user/v1/generated.pb.go @@ -0,0 +1,2274 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/openshift/api/user/v1/generated.proto + +package v1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *Group) Reset() { *m = Group{} } +func (*Group) ProtoMessage() {} +func (*Group) Descriptor() ([]byte, []int) { + return fileDescriptor_ea159b02d89a1362, []int{0} +} +func (m *Group) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Group) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Group) XXX_Merge(src proto.Message) { + xxx_messageInfo_Group.Merge(m, src) +} +func (m *Group) XXX_Size() int { + return m.Size() +} +func (m *Group) XXX_DiscardUnknown() { + xxx_messageInfo_Group.DiscardUnknown(m) +} + +var xxx_messageInfo_Group proto.InternalMessageInfo + +func (m *GroupList) Reset() { *m = GroupList{} } +func (*GroupList) ProtoMessage() {} +func (*GroupList) Descriptor() ([]byte, []int) { + return fileDescriptor_ea159b02d89a1362, []int{1} +} +func (m *GroupList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GroupList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GroupList) XXX_Merge(src proto.Message) { + xxx_messageInfo_GroupList.Merge(m, src) +} +func (m *GroupList) XXX_Size() int { + return m.Size() +} +func (m *GroupList) XXX_DiscardUnknown() { + xxx_messageInfo_GroupList.DiscardUnknown(m) +} + +var xxx_messageInfo_GroupList proto.InternalMessageInfo + +func (m *Identity) Reset() { *m = Identity{} } +func (*Identity) ProtoMessage() {} +func (*Identity) Descriptor() ([]byte, []int) { + return fileDescriptor_ea159b02d89a1362, []int{2} +} +func (m *Identity) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Identity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Identity) XXX_Merge(src proto.Message) { + xxx_messageInfo_Identity.Merge(m, src) +} +func (m *Identity) XXX_Size() int { + return m.Size() +} +func (m *Identity) XXX_DiscardUnknown() { + xxx_messageInfo_Identity.DiscardUnknown(m) +} + +var xxx_messageInfo_Identity proto.InternalMessageInfo + +func (m *IdentityList) Reset() { *m = IdentityList{} } +func (*IdentityList) ProtoMessage() {} +func (*IdentityList) Descriptor() ([]byte, []int) { + return fileDescriptor_ea159b02d89a1362, []int{3} +} +func (m *IdentityList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IdentityList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IdentityList) XXX_Merge(src proto.Message) { + xxx_messageInfo_IdentityList.Merge(m, src) +} +func (m *IdentityList) XXX_Size() int { + return m.Size() +} +func (m *IdentityList) XXX_DiscardUnknown() { + xxx_messageInfo_IdentityList.DiscardUnknown(m) +} + +var xxx_messageInfo_IdentityList proto.InternalMessageInfo + +func (m *OptionalNames) Reset() { *m = OptionalNames{} } +func (*OptionalNames) ProtoMessage() {} +func (*OptionalNames) Descriptor() ([]byte, []int) { + return fileDescriptor_ea159b02d89a1362, []int{4} +} +func (m *OptionalNames) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *OptionalNames) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *OptionalNames) XXX_Merge(src proto.Message) { + xxx_messageInfo_OptionalNames.Merge(m, src) +} +func (m *OptionalNames) XXX_Size() int { + return m.Size() +} +func (m *OptionalNames) XXX_DiscardUnknown() { + xxx_messageInfo_OptionalNames.DiscardUnknown(m) +} + +var xxx_messageInfo_OptionalNames proto.InternalMessageInfo + +func (m *User) Reset() { *m = User{} } +func (*User) ProtoMessage() {} +func (*User) Descriptor() ([]byte, []int) { + return fileDescriptor_ea159b02d89a1362, []int{5} +} +func (m *User) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *User) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *User) XXX_Merge(src proto.Message) { + xxx_messageInfo_User.Merge(m, src) +} +func (m *User) XXX_Size() int { + return m.Size() +} +func (m *User) XXX_DiscardUnknown() { + xxx_messageInfo_User.DiscardUnknown(m) +} + +var xxx_messageInfo_User proto.InternalMessageInfo + +func (m *UserIdentityMapping) Reset() { *m = UserIdentityMapping{} } +func (*UserIdentityMapping) ProtoMessage() {} +func (*UserIdentityMapping) Descriptor() ([]byte, []int) { + return fileDescriptor_ea159b02d89a1362, []int{6} +} +func (m *UserIdentityMapping) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UserIdentityMapping) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *UserIdentityMapping) XXX_Merge(src proto.Message) { + xxx_messageInfo_UserIdentityMapping.Merge(m, src) +} +func (m *UserIdentityMapping) XXX_Size() int { + return m.Size() +} +func (m *UserIdentityMapping) XXX_DiscardUnknown() { + xxx_messageInfo_UserIdentityMapping.DiscardUnknown(m) +} + +var xxx_messageInfo_UserIdentityMapping proto.InternalMessageInfo + +func (m *UserList) Reset() { *m = UserList{} } +func (*UserList) ProtoMessage() {} +func (*UserList) Descriptor() ([]byte, []int) { + return fileDescriptor_ea159b02d89a1362, []int{7} +} +func (m *UserList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UserList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *UserList) XXX_Merge(src proto.Message) { + xxx_messageInfo_UserList.Merge(m, src) +} +func (m *UserList) XXX_Size() int { + return m.Size() +} +func (m *UserList) XXX_DiscardUnknown() { + xxx_messageInfo_UserList.DiscardUnknown(m) +} + +var xxx_messageInfo_UserList proto.InternalMessageInfo + +func init() { + proto.RegisterType((*Group)(nil), "github.com.openshift.api.user.v1.Group") + proto.RegisterType((*GroupList)(nil), "github.com.openshift.api.user.v1.GroupList") + proto.RegisterType((*Identity)(nil), "github.com.openshift.api.user.v1.Identity") + proto.RegisterMapType((map[string]string)(nil), "github.com.openshift.api.user.v1.Identity.ExtraEntry") + proto.RegisterType((*IdentityList)(nil), "github.com.openshift.api.user.v1.IdentityList") + proto.RegisterType((*OptionalNames)(nil), "github.com.openshift.api.user.v1.OptionalNames") + proto.RegisterType((*User)(nil), "github.com.openshift.api.user.v1.User") + proto.RegisterType((*UserIdentityMapping)(nil), "github.com.openshift.api.user.v1.UserIdentityMapping") + proto.RegisterType((*UserList)(nil), "github.com.openshift.api.user.v1.UserList") +} + +func init() { + proto.RegisterFile("github.com/openshift/api/user/v1/generated.proto", fileDescriptor_ea159b02d89a1362) +} + +var fileDescriptor_ea159b02d89a1362 = []byte{ + // 726 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0x3d, 0x6f, 0x13, 0x4b, + 0x14, 0xf5, 0xc4, 0xde, 0xc8, 0x9e, 0x38, 0x4f, 0xd6, 0xbe, 0x14, 0x2b, 0x17, 0x6b, 0x6b, 0x9f, + 0xf4, 0x88, 0x10, 0xcc, 0x26, 0x11, 0x20, 0x2b, 0xa5, 0x45, 0x82, 0x22, 0x12, 0x12, 0x46, 0xa2, + 0x89, 0x28, 0x98, 0xd8, 0xe3, 0xf5, 0x60, 0xef, 0x87, 0x76, 0x67, 0x2d, 0xdc, 0xe5, 0x27, 0x40, + 0x47, 0xc9, 0x9f, 0x40, 0x14, 0x88, 0x3e, 0x74, 0x29, 0x53, 0x20, 0x8b, 0x2c, 0x1d, 0xbf, 0x02, + 0xcd, 0xec, 0x87, 0xd7, 0xf9, 0x90, 0x23, 0x21, 0xb9, 0xdb, 0xb9, 0x73, 0xcf, 0x99, 0x73, 0xcf, + 0xbd, 0xd7, 0x32, 0xdc, 0xb0, 0x18, 0xef, 0x87, 0x27, 0xa8, 0xe3, 0xda, 0xa6, 0xeb, 0x51, 0x27, + 0xe8, 0xb3, 0x1e, 0x37, 0x89, 0xc7, 0xcc, 0x30, 0xa0, 0xbe, 0x39, 0xda, 0x34, 0x2d, 0xea, 0x50, + 0x9f, 0x70, 0xda, 0x45, 0x9e, 0xef, 0x72, 0x57, 0x6d, 0x4e, 0x11, 0x28, 0x43, 0x20, 0xe2, 0x31, + 0x24, 0x10, 0x68, 0xb4, 0x59, 0x7f, 0x98, 0xe3, 0xb4, 0x5c, 0xcb, 0x35, 0x25, 0xf0, 0x24, 0xec, + 0xc9, 0x93, 0x3c, 0xc8, 0xaf, 0x98, 0xb0, 0x6e, 0x0c, 0x5a, 0x01, 0x62, 0xae, 0x7c, 0xb4, 0xe3, + 0xfa, 0xf4, 0x86, 0x47, 0xeb, 0x8f, 0xa6, 0x39, 0x36, 0xe9, 0xf4, 0x99, 0x43, 0xfd, 0xb1, 0xe9, + 0x0d, 0x2c, 0x11, 0x08, 0x4c, 0x9b, 0x72, 0x72, 0x13, 0xea, 0xc9, 0x6d, 0x28, 0x3f, 0x74, 0x38, + 0xb3, 0xa9, 0x19, 0x74, 0xfa, 0xd4, 0x26, 0x57, 0x71, 0xc6, 0x57, 0x00, 0x95, 0x67, 0xbe, 0x1b, + 0x7a, 0xea, 0x1b, 0x58, 0x16, 0xe4, 0x5d, 0xc2, 0x89, 0x06, 0x9a, 0x60, 0x7d, 0x65, 0x6b, 0x03, + 0xc5, 0xa4, 0x28, 0x4f, 0x8a, 0xbc, 0x81, 0x25, 0x02, 0x01, 0x12, 0xd9, 0x68, 0xb4, 0x89, 0x0e, + 0x4f, 0xde, 0xd2, 0x0e, 0x3f, 0xa0, 0x9c, 0xb4, 0xd5, 0xb3, 0x49, 0xa3, 0x10, 0x4d, 0x1a, 0x70, + 0x1a, 0xc3, 0x19, 0xab, 0x7a, 0x04, 0x15, 0xe1, 0x5b, 0xa0, 0x2d, 0x49, 0x7a, 0x13, 0xcd, 0xb3, + 0x17, 0x1d, 0x7a, 0x9c, 0xb9, 0x0e, 0x19, 0xbe, 0x20, 0x36, 0x0d, 0xda, 0x95, 0x68, 0xd2, 0x50, + 0x5e, 0x09, 0x06, 0x1c, 0x13, 0x19, 0x5f, 0x00, 0xac, 0x48, 0xf5, 0xfb, 0x2c, 0xe0, 0xea, 0xeb, + 0x6b, 0x15, 0xa0, 0xbb, 0x55, 0x20, 0xd0, 0x52, 0x7f, 0x2d, 0xd1, 0x5f, 0x4e, 0x23, 0x39, 0xf5, + 0xfb, 0x50, 0x61, 0x9c, 0xda, 0x42, 0x7d, 0x71, 0x7d, 0x65, 0xeb, 0xde, 0x7c, 0xf5, 0x52, 0x59, + 0x7b, 0x35, 0xe1, 0x54, 0xf6, 0x04, 0x1a, 0xc7, 0x24, 0xc6, 0xf7, 0x22, 0x2c, 0xef, 0x75, 0xa9, + 0xc3, 0x19, 0x1f, 0x2f, 0xc0, 0xfa, 0x16, 0xac, 0x7a, 0xbe, 0x3b, 0x62, 0x5d, 0xea, 0x0b, 0x2f, + 0x65, 0x07, 0x2a, 0xed, 0xb5, 0x04, 0x53, 0x3d, 0xca, 0xdd, 0xe1, 0x99, 0x4c, 0xf5, 0x29, 0xac, + 0xa5, 0x67, 0x61, 0xbd, 0x44, 0x17, 0x25, 0x5a, 0x4b, 0xd0, 0xb5, 0xa3, 0x2b, 0xf7, 0xf8, 0x1a, + 0x42, 0xdd, 0x81, 0x25, 0xe1, 0x8a, 0x56, 0x92, 0xd5, 0xfd, 0x97, 0xab, 0x0e, 0x89, 0x3d, 0x98, + 0xd6, 0x82, 0x69, 0x8f, 0xfa, 0xd4, 0xe9, 0xd0, 0x76, 0x35, 0xa1, 0x2f, 0x09, 0x12, 0x2c, 0xe1, + 0xea, 0x31, 0x54, 0xe8, 0x3b, 0xee, 0x13, 0x4d, 0x91, 0x3d, 0x78, 0x3c, 0xbf, 0x07, 0xa9, 0xc7, + 0x68, 0x47, 0xe0, 0x76, 0x1c, 0xee, 0x8f, 0xa7, 0x1d, 0x91, 0x31, 0x1c, 0x53, 0xd6, 0x5b, 0x10, + 0x4e, 0x73, 0xd4, 0x1a, 0x2c, 0x0e, 0xe8, 0x58, 0x76, 0xa3, 0x82, 0xc5, 0xa7, 0xba, 0x06, 0x95, + 0x11, 0x19, 0x86, 0x89, 0x77, 0x38, 0x3e, 0x6c, 0x2f, 0xb5, 0x80, 0xf1, 0x0d, 0xc0, 0x6a, 0xfa, + 0xce, 0x02, 0x06, 0xf1, 0x70, 0x76, 0x10, 0xef, 0xdf, 0xdd, 0x84, 0x5b, 0x66, 0x71, 0x1b, 0xae, + 0xce, 0x2c, 0x9a, 0xda, 0x48, 0x5f, 0x00, 0xcd, 0xe2, 0x7a, 0x25, 0xde, 0xbb, 0x3c, 0x62, 0xbb, + 0xfc, 0xf1, 0x53, 0xa3, 0x70, 0xfa, 0xa3, 0x59, 0x30, 0x7e, 0x03, 0x28, 0x1b, 0xb4, 0x80, 0x19, + 0x7e, 0x00, 0xcb, 0xbd, 0x70, 0x38, 0xcc, 0xcd, 0x6f, 0xe6, 0xd2, 0x6e, 0x12, 0xc7, 0x59, 0x86, + 0x8a, 0x20, 0x64, 0x71, 0xd9, 0x8c, 0x06, 0x5a, 0x51, 0x16, 0xf2, 0x8f, 0xe0, 0xde, 0xcb, 0xa2, + 0x38, 0x97, 0xa1, 0x1a, 0x70, 0xd9, 0x12, 0xfb, 0x1a, 0x68, 0x25, 0x99, 0x0b, 0xa3, 0x49, 0x63, + 0x59, 0x6e, 0x70, 0x80, 0x93, 0x1b, 0xe3, 0xc3, 0x12, 0xfc, 0x57, 0x14, 0x9b, 0xfa, 0x79, 0x40, + 0x3c, 0x8f, 0x39, 0xd6, 0x02, 0x6a, 0x7f, 0x09, 0xcb, 0x89, 0xd6, 0x71, 0xf2, 0xeb, 0x79, 0xa7, + 0x1d, 0xca, 0x0c, 0x4a, 0x15, 0xe3, 0x8c, 0x26, 0x5b, 0xc9, 0xe2, 0x5f, 0xad, 0xa4, 0xf1, 0x19, + 0xc0, 0xb2, 0x38, 0x2e, 0x60, 0xf0, 0x9f, 0xcf, 0x0e, 0xfe, 0xff, 0xf3, 0x07, 0x5f, 0x08, 0xbb, + 0x79, 0xe8, 0xdb, 0xbb, 0x67, 0x97, 0x7a, 0xe1, 0xfc, 0x52, 0x2f, 0x5c, 0x5c, 0xea, 0x85, 0xd3, + 0x48, 0x07, 0x67, 0x91, 0x0e, 0xce, 0x23, 0x1d, 0x5c, 0x44, 0x3a, 0xf8, 0x19, 0xe9, 0xe0, 0xfd, + 0x2f, 0xbd, 0x70, 0xdc, 0x9c, 0xf7, 0x9f, 0xe1, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x45, 0x85, + 0x81, 0x86, 0x56, 0x08, 0x00, 0x00, +} + +func (m *Group) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Group) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Group) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Users != nil { + { + size, err := m.Users.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *GroupList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GroupList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GroupList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Identity) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Identity) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Identity) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Extra) > 0 { + keysForExtra := make([]string, 0, len(m.Extra)) + for k := range m.Extra { + keysForExtra = append(keysForExtra, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForExtra) + for iNdEx := len(keysForExtra) - 1; iNdEx >= 0; iNdEx-- { + v := m.Extra[string(keysForExtra[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForExtra[iNdEx]) + copy(dAtA[i:], keysForExtra[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForExtra[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2a + } + } + { + size, err := m.User.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + i -= len(m.ProviderUserName) + copy(dAtA[i:], m.ProviderUserName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ProviderUserName))) + i-- + dAtA[i] = 0x1a + i -= len(m.ProviderName) + copy(dAtA[i:], m.ProviderName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ProviderName))) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *IdentityList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IdentityList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IdentityList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m OptionalNames) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m OptionalNames) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m OptionalNames) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m) > 0 { + for iNdEx := len(m) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m[iNdEx]) + copy(dAtA[i:], m[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *User) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *User) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *User) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Groups) > 0 { + for iNdEx := len(m.Groups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Groups[iNdEx]) + copy(dAtA[i:], m.Groups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Groups[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if len(m.Identities) > 0 { + for iNdEx := len(m.Identities) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Identities[iNdEx]) + copy(dAtA[i:], m.Identities[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Identities[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.FullName) + copy(dAtA[i:], m.FullName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FullName))) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *UserIdentityMapping) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UserIdentityMapping) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UserIdentityMapping) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.User.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Identity.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *UserList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UserList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UserList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Group) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Users != nil { + l = m.Users.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *GroupList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *Identity) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ProviderName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ProviderUserName) + n += 1 + l + sovGenerated(uint64(l)) + l = m.User.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Extra) > 0 { + for k, v := range m.Extra { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *IdentityList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m OptionalNames) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m) > 0 { + for _, s := range m { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *User) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FullName) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Identities) > 0 { + for _, s := range m.Identities { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Groups) > 0 { + for _, s := range m.Groups { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *UserIdentityMapping) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Identity.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.User.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *UserList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Group) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Group{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Users:` + strings.Replace(fmt.Sprintf("%v", this.Users), "OptionalNames", "OptionalNames", 1) + `,`, + `}`, + }, "") + return s +} +func (this *GroupList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Group{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Group", "Group", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&GroupList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *Identity) String() string { + if this == nil { + return "nil" + } + keysForExtra := make([]string, 0, len(this.Extra)) + for k := range this.Extra { + keysForExtra = append(keysForExtra, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForExtra) + mapStringForExtra := "map[string]string{" + for _, k := range keysForExtra { + mapStringForExtra += fmt.Sprintf("%v: %v,", k, this.Extra[k]) + } + mapStringForExtra += "}" + s := strings.Join([]string{`&Identity{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ProviderName:` + fmt.Sprintf("%v", this.ProviderName) + `,`, + `ProviderUserName:` + fmt.Sprintf("%v", this.ProviderUserName) + `,`, + `User:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.User), "ObjectReference", "v11.ObjectReference", 1), `&`, ``, 1) + `,`, + `Extra:` + mapStringForExtra + `,`, + `}`, + }, "") + return s +} +func (this *IdentityList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Identity{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Identity", "Identity", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&IdentityList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *User) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&User{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `FullName:` + fmt.Sprintf("%v", this.FullName) + `,`, + `Identities:` + fmt.Sprintf("%v", this.Identities) + `,`, + `Groups:` + fmt.Sprintf("%v", this.Groups) + `,`, + `}`, + }, "") + return s +} +func (this *UserIdentityMapping) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UserIdentityMapping{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Identity:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Identity), "ObjectReference", "v11.ObjectReference", 1), `&`, ``, 1) + `,`, + `User:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.User), "ObjectReference", "v11.ObjectReference", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *UserList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]User{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "User", "User", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&UserList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Group) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Group: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Group: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Users", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Users == nil { + m.Users = OptionalNames{} + } + if err := m.Users.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GroupList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GroupList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GroupList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Group{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Identity) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Identity: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Identity: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProviderName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProviderName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProviderUserName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProviderUserName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.User.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Extra", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Extra == nil { + m.Extra = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Extra[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IdentityList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IdentityList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IdentityList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Identity{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OptionalNames) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OptionalNames: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OptionalNames: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + *m = append(*m, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *User) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: User: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: User: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FullName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FullName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Identities", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Identities = append(m.Identities, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Groups = append(m.Groups, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UserIdentityMapping) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UserIdentityMapping: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UserIdentityMapping: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Identity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Identity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.User.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UserList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UserList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UserList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, User{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/github.com/openshift/api/user/v1/generated.proto b/vendor/github.com/openshift/api/user/v1/generated.proto new file mode 100644 index 000000000..5b8a2eb12 --- /dev/null +++ b/vendor/github.com/openshift/api/user/v1/generated.proto @@ -0,0 +1,144 @@ + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package github.com.openshift.api.user.v1; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "github.com/openshift/api/user/v1"; + +// Group represents a referenceable set of Users +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message Group { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Users is the list of users in this group. + optional OptionalNames users = 2; +} + +// GroupList is a collection of Groups +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message GroupList { + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of groups + repeated Group items = 2; +} + +// Identity records a successful authentication of a user with an identity provider. The +// information about the source of authentication is stored on the identity, and the identity +// is then associated with a single user object. Multiple identities can reference a single +// user. Information retrieved from the authentication provider is stored in the extra field +// using a schema determined by the provider. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message Identity { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // ProviderName is the source of identity information + optional string providerName = 2; + + // ProviderUserName uniquely represents this identity in the scope of the provider + optional string providerUserName = 3; + + // User is a reference to the user this identity is associated with + // Both Name and UID must be set + optional k8s.io.api.core.v1.ObjectReference user = 4; + + // Extra holds extra information about this identity + map extra = 5; +} + +// IdentityList is a collection of Identities +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message IdentityList { + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of identities + repeated Identity items = 2; +} + +// OptionalNames is an array that may also be left nil to distinguish between set and unset. +// +protobuf.nullable=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +message OptionalNames { + // items, if empty, will result in an empty slice + + repeated string items = 1; +} + +// Upon log in, every user of the system receives a User and Identity resource. Administrators +// may directly manipulate the attributes of the users for their own tracking, or set groups +// via the API. The user name is unique and is chosen based on the value provided by the +// identity provider - if a user already exists with the incoming name, the user name may have +// a number appended to it depending on the configuration of the system. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message User { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // FullName is the full name of user + optional string fullName = 2; + + // Identities are the identities associated with this user + // +optional + repeated string identities = 3; + + // Groups specifies group names this user is a member of. + // This field is deprecated and will be removed in a future release. + // Instead, create a Group object containing the name of this User. + repeated string groups = 4; +} + +// UserIdentityMapping maps a user to an identity +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message UserIdentityMapping { + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Identity is a reference to an identity + optional k8s.io.api.core.v1.ObjectReference identity = 2; + + // User is a reference to a user + optional k8s.io.api.core.v1.ObjectReference user = 3; +} + +// UserList is a collection of Users +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +message UserList { + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of users + repeated User items = 2; +} + diff --git a/vendor/github.com/openshift/api/user/v1/legacy.go b/vendor/github.com/openshift/api/user/v1/legacy.go new file mode 100644 index 000000000..6817a9f1f --- /dev/null +++ b/vendor/github.com/openshift/api/user/v1/legacy.go @@ -0,0 +1,27 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + legacyGroupVersion = schema.GroupVersion{Group: "", Version: "v1"} + legacySchemeBuilder = runtime.NewSchemeBuilder(addLegacyKnownTypes, corev1.AddToScheme) + DeprecatedInstallWithoutGroup = legacySchemeBuilder.AddToScheme +) + +func addLegacyKnownTypes(scheme *runtime.Scheme) error { + types := []runtime.Object{ + &User{}, + &UserList{}, + &Identity{}, + &IdentityList{}, + &UserIdentityMapping{}, + &Group{}, + &GroupList{}, + } + scheme.AddKnownTypes(legacyGroupVersion, types...) + return nil +} diff --git a/vendor/github.com/openshift/api/user/v1/register.go b/vendor/github.com/openshift/api/user/v1/register.go new file mode 100644 index 000000000..11341d72a --- /dev/null +++ b/vendor/github.com/openshift/api/user/v1/register.go @@ -0,0 +1,44 @@ +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var ( + GroupName = "user.openshift.io" + GroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + schemeBuilder = runtime.NewSchemeBuilder(addKnownTypes, corev1.AddToScheme) + // Install is a function which adds this version to a scheme + Install = schemeBuilder.AddToScheme + + // SchemeGroupVersion generated code relies on this name + // Deprecated + SchemeGroupVersion = GroupVersion + // AddToScheme exists solely to keep the old generators creating valid code + // DEPRECATED + AddToScheme = schemeBuilder.AddToScheme +) + +// Resource generated code relies on this being here, but it logically belongs to the group +// DEPRECATED +func Resource(resource string) schema.GroupResource { + return schema.GroupResource{Group: GroupName, Resource: resource} +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(GroupVersion, + &User{}, + &UserList{}, + &Identity{}, + &IdentityList{}, + &UserIdentityMapping{}, + &Group{}, + &GroupList{}, + ) + metav1.AddToGroupVersion(scheme, GroupVersion) + return nil +} diff --git a/vendor/github.com/openshift/api/user/v1/types.go b/vendor/github.com/openshift/api/user/v1/types.go new file mode 100644 index 000000000..7014bbfac --- /dev/null +++ b/vendor/github.com/openshift/api/user/v1/types.go @@ -0,0 +1,174 @@ +package v1 + +import ( + "fmt" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Upon log in, every user of the system receives a User and Identity resource. Administrators +// may directly manipulate the attributes of the users for their own tracking, or set groups +// via the API. The user name is unique and is chosen based on the value provided by the +// identity provider - if a user already exists with the incoming name, the user name may have +// a number appended to it depending on the configuration of the system. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type User struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // FullName is the full name of user + FullName string `json:"fullName,omitempty" protobuf:"bytes,2,opt,name=fullName"` + + // Identities are the identities associated with this user + // +optional + Identities []string `json:"identities,omitempty" protobuf:"bytes,3,rep,name=identities"` + + // Groups specifies group names this user is a member of. + // This field is deprecated and will be removed in a future release. + // Instead, create a Group object containing the name of this User. + Groups []string `json:"groups" protobuf:"bytes,4,rep,name=groups"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// UserList is a collection of Users +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type UserList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of users + Items []User `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Identity records a successful authentication of a user with an identity provider. The +// information about the source of authentication is stored on the identity, and the identity +// is then associated with a single user object. Multiple identities can reference a single +// user. Information retrieved from the authentication provider is stored in the extra field +// using a schema determined by the provider. +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type Identity struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // ProviderName is the source of identity information + ProviderName string `json:"providerName" protobuf:"bytes,2,opt,name=providerName"` + + // ProviderUserName uniquely represents this identity in the scope of the provider + ProviderUserName string `json:"providerUserName" protobuf:"bytes,3,opt,name=providerUserName"` + + // User is a reference to the user this identity is associated with + // Both Name and UID must be set + User corev1.ObjectReference `json:"user" protobuf:"bytes,4,opt,name=user"` + + // Extra holds extra information about this identity + Extra map[string]string `json:"extra,omitempty" protobuf:"bytes,5,rep,name=extra"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// IdentityList is a collection of Identities +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type IdentityList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of identities + Items []Identity `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +genclient:nonNamespaced +// +genclient:onlyVerbs=get,create,update,delete +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// UserIdentityMapping maps a user to an identity +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type UserIdentityMapping struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Identity is a reference to an identity + Identity corev1.ObjectReference `json:"identity,omitempty" protobuf:"bytes,2,opt,name=identity"` + // User is a reference to a user + User corev1.ObjectReference `json:"user,omitempty" protobuf:"bytes,3,opt,name=user"` +} + +// OptionalNames is an array that may also be left nil to distinguish between set and unset. +// +protobuf.nullable=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +type OptionalNames []string + +func (t OptionalNames) String() string { + return fmt.Sprintf("%v", []string(t)) +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Group represents a referenceable set of Users +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type Group struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Users is the list of users in this group. + Users OptionalNames `json:"users" protobuf:"bytes,2,rep,name=users"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// GroupList is a collection of Groups +// +// Compatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer). +// +openshift:compatibility-gen:level=1 +type GroupList struct { + metav1.TypeMeta `json:",inline"` + + // metadata is the standard list's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of groups + Items []Group `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/github.com/openshift/api/user/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/api/user/v1/zz_generated.deepcopy.go new file mode 100644 index 000000000..e6b2fb867 --- /dev/null +++ b/vendor/github.com/openshift/api/user/v1/zz_generated.deepcopy.go @@ -0,0 +1,258 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Group) DeepCopyInto(out *Group) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Users != nil { + in, out := &in.Users, &out.Users + *out = make(OptionalNames, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Group. +func (in *Group) DeepCopy() *Group { + if in == nil { + return nil + } + out := new(Group) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Group) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupList) DeepCopyInto(out *GroupList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Group, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupList. +func (in *GroupList) DeepCopy() *GroupList { + if in == nil { + return nil + } + out := new(GroupList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GroupList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Identity) DeepCopyInto(out *Identity) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.User = in.User + if in.Extra != nil { + in, out := &in.Extra, &out.Extra + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Identity. +func (in *Identity) DeepCopy() *Identity { + if in == nil { + return nil + } + out := new(Identity) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Identity) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IdentityList) DeepCopyInto(out *IdentityList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Identity, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IdentityList. +func (in *IdentityList) DeepCopy() *IdentityList { + if in == nil { + return nil + } + out := new(IdentityList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IdentityList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in OptionalNames) DeepCopyInto(out *OptionalNames) { + { + in := &in + *out = make(OptionalNames, len(*in)) + copy(*out, *in) + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OptionalNames. +func (in OptionalNames) DeepCopy() OptionalNames { + if in == nil { + return nil + } + out := new(OptionalNames) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *User) DeepCopyInto(out *User) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Identities != nil { + in, out := &in.Identities, &out.Identities + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Groups != nil { + in, out := &in.Groups, &out.Groups + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new User. +func (in *User) DeepCopy() *User { + if in == nil { + return nil + } + out := new(User) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *User) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserIdentityMapping) DeepCopyInto(out *UserIdentityMapping) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Identity = in.Identity + out.User = in.User + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserIdentityMapping. +func (in *UserIdentityMapping) DeepCopy() *UserIdentityMapping { + if in == nil { + return nil + } + out := new(UserIdentityMapping) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *UserIdentityMapping) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserList) DeepCopyInto(out *UserList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]User, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserList. +func (in *UserList) DeepCopy() *UserList { + if in == nil { + return nil + } + out := new(UserList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *UserList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/vendor/github.com/openshift/api/user/v1/zz_generated.swagger_doc_generated.go b/vendor/github.com/openshift/api/user/v1/zz_generated.swagger_doc_generated.go new file mode 100644 index 000000000..5844723a7 --- /dev/null +++ b/vendor/github.com/openshift/api/user/v1/zz_generated.swagger_doc_generated.go @@ -0,0 +1,90 @@ +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_Group = map[string]string{ + "": "Group represents a referenceable set of Users\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "users": "Users is the list of users in this group.", +} + +func (Group) SwaggerDoc() map[string]string { + return map_Group +} + +var map_GroupList = map[string]string{ + "": "GroupList is a collection of Groups\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "Items is the list of groups", +} + +func (GroupList) SwaggerDoc() map[string]string { + return map_GroupList +} + +var map_Identity = map[string]string{ + "": "Identity records a successful authentication of a user with an identity provider. The information about the source of authentication is stored on the identity, and the identity is then associated with a single user object. Multiple identities can reference a single user. Information retrieved from the authentication provider is stored in the extra field using a schema determined by the provider.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "providerName": "ProviderName is the source of identity information", + "providerUserName": "ProviderUserName uniquely represents this identity in the scope of the provider", + "user": "User is a reference to the user this identity is associated with Both Name and UID must be set", + "extra": "Extra holds extra information about this identity", +} + +func (Identity) SwaggerDoc() map[string]string { + return map_Identity +} + +var map_IdentityList = map[string]string{ + "": "IdentityList is a collection of Identities\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "Items is the list of identities", +} + +func (IdentityList) SwaggerDoc() map[string]string { + return map_IdentityList +} + +var map_User = map[string]string{ + "": "Upon log in, every user of the system receives a User and Identity resource. Administrators may directly manipulate the attributes of the users for their own tracking, or set groups via the API. The user name is unique and is chosen based on the value provided by the identity provider - if a user already exists with the incoming name, the user name may have a number appended to it depending on the configuration of the system.\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "fullName": "FullName is the full name of user", + "identities": "Identities are the identities associated with this user", + "groups": "Groups specifies group names this user is a member of. This field is deprecated and will be removed in a future release. Instead, create a Group object containing the name of this User.", +} + +func (User) SwaggerDoc() map[string]string { + return map_User +} + +var map_UserIdentityMapping = map[string]string{ + "": "UserIdentityMapping maps a user to an identity\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "identity": "Identity is a reference to an identity", + "user": "User is a reference to a user", +} + +func (UserIdentityMapping) SwaggerDoc() map[string]string { + return map_UserIdentityMapping +} + +var map_UserList = map[string]string{ + "": "UserList is a collection of Users\n\nCompatibility level 1: Stable within a major release for a minimum of 12 months or 3 minor releases (whichever is longer).", + "metadata": "metadata is the standard list's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "Items is the list of users", +} + +func (UserList) SwaggerDoc() map[string]string { + return map_UserList +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/binarybuildsource.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/binarybuildsource.go new file mode 100644 index 000000000..c3f017207 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/binarybuildsource.go @@ -0,0 +1,23 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// BinaryBuildSourceApplyConfiguration represents an declarative configuration of the BinaryBuildSource type for use +// with apply. +type BinaryBuildSourceApplyConfiguration struct { + AsFile *string `json:"asFile,omitempty"` +} + +// BinaryBuildSourceApplyConfiguration constructs an declarative configuration of the BinaryBuildSource type for use with +// apply. +func BinaryBuildSource() *BinaryBuildSourceApplyConfiguration { + return &BinaryBuildSourceApplyConfiguration{} +} + +// WithAsFile sets the AsFile field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AsFile field is set to the value of the last call. +func (b *BinaryBuildSourceApplyConfiguration) WithAsFile(value string) *BinaryBuildSourceApplyConfiguration { + b.AsFile = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/bitbucketwebhookcause.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/bitbucketwebhookcause.go new file mode 100644 index 000000000..e6300d6db --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/bitbucketwebhookcause.go @@ -0,0 +1,31 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// BitbucketWebHookCauseApplyConfiguration represents an declarative configuration of the BitbucketWebHookCause type for use +// with apply. +type BitbucketWebHookCauseApplyConfiguration struct { + CommonWebHookCauseApplyConfiguration `json:",inline"` +} + +// BitbucketWebHookCauseApplyConfiguration constructs an declarative configuration of the BitbucketWebHookCause type for use with +// apply. +func BitbucketWebHookCause() *BitbucketWebHookCauseApplyConfiguration { + return &BitbucketWebHookCauseApplyConfiguration{} +} + +// WithRevision sets the Revision field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Revision field is set to the value of the last call. +func (b *BitbucketWebHookCauseApplyConfiguration) WithRevision(value *SourceRevisionApplyConfiguration) *BitbucketWebHookCauseApplyConfiguration { + b.Revision = value + return b +} + +// WithSecret sets the Secret field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Secret field is set to the value of the last call. +func (b *BitbucketWebHookCauseApplyConfiguration) WithSecret(value string) *BitbucketWebHookCauseApplyConfiguration { + b.Secret = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/build.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/build.go new file mode 100644 index 000000000..7a20d67ca --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/build.go @@ -0,0 +1,242 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + apibuildv1 "github.com/openshift/api/build/v1" + internal "github.com/openshift/client-go/build/applyconfigurations/internal" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// BuildApplyConfiguration represents an declarative configuration of the Build type for use +// with apply. +type BuildApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *BuildSpecApplyConfiguration `json:"spec,omitempty"` + Status *BuildStatusApplyConfiguration `json:"status,omitempty"` +} + +// Build constructs an declarative configuration of the Build type for use with +// apply. +func Build(name, namespace string) *BuildApplyConfiguration { + b := &BuildApplyConfiguration{} + b.WithName(name) + b.WithNamespace(namespace) + b.WithKind("Build") + b.WithAPIVersion("build.openshift.io/v1") + return b +} + +// ExtractBuild extracts the applied configuration owned by fieldManager from +// build. If no managedFields are found in build for fieldManager, a +// BuildApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// build must be a unmodified Build API object that was retrieved from the Kubernetes API. +// ExtractBuild provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractBuild(build *apibuildv1.Build, fieldManager string) (*BuildApplyConfiguration, error) { + return extractBuild(build, fieldManager, "") +} + +// ExtractBuildStatus is the same as ExtractBuild except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractBuildStatus(build *apibuildv1.Build, fieldManager string) (*BuildApplyConfiguration, error) { + return extractBuild(build, fieldManager, "status") +} + +func extractBuild(build *apibuildv1.Build, fieldManager string, subresource string) (*BuildApplyConfiguration, error) { + b := &BuildApplyConfiguration{} + err := managedfields.ExtractInto(build, internal.Parser().Type("com.github.openshift.api.build.v1.Build"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(build.Name) + b.WithNamespace(build.Namespace) + + b.WithKind("Build") + b.WithAPIVersion("build.openshift.io/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *BuildApplyConfiguration) WithKind(value string) *BuildApplyConfiguration { + b.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *BuildApplyConfiguration) WithAPIVersion(value string) *BuildApplyConfiguration { + b.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *BuildApplyConfiguration) WithName(value string) *BuildApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *BuildApplyConfiguration) WithGenerateName(value string) *BuildApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *BuildApplyConfiguration) WithNamespace(value string) *BuildApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *BuildApplyConfiguration) WithUID(value types.UID) *BuildApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *BuildApplyConfiguration) WithResourceVersion(value string) *BuildApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *BuildApplyConfiguration) WithGeneration(value int64) *BuildApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *BuildApplyConfiguration) WithCreationTimestamp(value metav1.Time) *BuildApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *BuildApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *BuildApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *BuildApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *BuildApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *BuildApplyConfiguration) WithLabels(entries map[string]string) *BuildApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Labels == nil && len(entries) > 0 { + b.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *BuildApplyConfiguration) WithAnnotations(entries map[string]string) *BuildApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Annotations == nil && len(entries) > 0 { + b.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *BuildApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *BuildApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.OwnerReferences = append(b.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *BuildApplyConfiguration) WithFinalizers(values ...string) *BuildApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.Finalizers = append(b.Finalizers, values[i]) + } + return b +} + +func (b *BuildApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *BuildApplyConfiguration) WithSpec(value *BuildSpecApplyConfiguration) *BuildApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *BuildApplyConfiguration) WithStatus(value *BuildStatusApplyConfiguration) *BuildApplyConfiguration { + b.Status = value + return b +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildcondition.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildcondition.go new file mode 100644 index 000000000..707dd8cc9 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildcondition.go @@ -0,0 +1,74 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/openshift/api/build/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// BuildConditionApplyConfiguration represents an declarative configuration of the BuildCondition type for use +// with apply. +type BuildConditionApplyConfiguration struct { + Type *v1.BuildConditionType `json:"type,omitempty"` + Status *corev1.ConditionStatus `json:"status,omitempty"` + LastUpdateTime *metav1.Time `json:"lastUpdateTime,omitempty"` + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` +} + +// BuildConditionApplyConfiguration constructs an declarative configuration of the BuildCondition type for use with +// apply. +func BuildCondition() *BuildConditionApplyConfiguration { + return &BuildConditionApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *BuildConditionApplyConfiguration) WithType(value v1.BuildConditionType) *BuildConditionApplyConfiguration { + b.Type = &value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *BuildConditionApplyConfiguration) WithStatus(value corev1.ConditionStatus) *BuildConditionApplyConfiguration { + b.Status = &value + return b +} + +// WithLastUpdateTime sets the LastUpdateTime field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LastUpdateTime field is set to the value of the last call. +func (b *BuildConditionApplyConfiguration) WithLastUpdateTime(value metav1.Time) *BuildConditionApplyConfiguration { + b.LastUpdateTime = &value + return b +} + +// WithLastTransitionTime sets the LastTransitionTime field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LastTransitionTime field is set to the value of the last call. +func (b *BuildConditionApplyConfiguration) WithLastTransitionTime(value metav1.Time) *BuildConditionApplyConfiguration { + b.LastTransitionTime = &value + return b +} + +// WithReason sets the Reason field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Reason field is set to the value of the last call. +func (b *BuildConditionApplyConfiguration) WithReason(value string) *BuildConditionApplyConfiguration { + b.Reason = &value + return b +} + +// WithMessage sets the Message field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Message field is set to the value of the last call. +func (b *BuildConditionApplyConfiguration) WithMessage(value string) *BuildConditionApplyConfiguration { + b.Message = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildconfig.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildconfig.go new file mode 100644 index 000000000..982bbefa5 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildconfig.go @@ -0,0 +1,242 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + apibuildv1 "github.com/openshift/api/build/v1" + internal "github.com/openshift/client-go/build/applyconfigurations/internal" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// BuildConfigApplyConfiguration represents an declarative configuration of the BuildConfig type for use +// with apply. +type BuildConfigApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *BuildConfigSpecApplyConfiguration `json:"spec,omitempty"` + Status *BuildConfigStatusApplyConfiguration `json:"status,omitempty"` +} + +// BuildConfig constructs an declarative configuration of the BuildConfig type for use with +// apply. +func BuildConfig(name, namespace string) *BuildConfigApplyConfiguration { + b := &BuildConfigApplyConfiguration{} + b.WithName(name) + b.WithNamespace(namespace) + b.WithKind("BuildConfig") + b.WithAPIVersion("build.openshift.io/v1") + return b +} + +// ExtractBuildConfig extracts the applied configuration owned by fieldManager from +// buildConfig. If no managedFields are found in buildConfig for fieldManager, a +// BuildConfigApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// buildConfig must be a unmodified BuildConfig API object that was retrieved from the Kubernetes API. +// ExtractBuildConfig provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractBuildConfig(buildConfig *apibuildv1.BuildConfig, fieldManager string) (*BuildConfigApplyConfiguration, error) { + return extractBuildConfig(buildConfig, fieldManager, "") +} + +// ExtractBuildConfigStatus is the same as ExtractBuildConfig except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractBuildConfigStatus(buildConfig *apibuildv1.BuildConfig, fieldManager string) (*BuildConfigApplyConfiguration, error) { + return extractBuildConfig(buildConfig, fieldManager, "status") +} + +func extractBuildConfig(buildConfig *apibuildv1.BuildConfig, fieldManager string, subresource string) (*BuildConfigApplyConfiguration, error) { + b := &BuildConfigApplyConfiguration{} + err := managedfields.ExtractInto(buildConfig, internal.Parser().Type("com.github.openshift.api.build.v1.BuildConfig"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(buildConfig.Name) + b.WithNamespace(buildConfig.Namespace) + + b.WithKind("BuildConfig") + b.WithAPIVersion("build.openshift.io/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *BuildConfigApplyConfiguration) WithKind(value string) *BuildConfigApplyConfiguration { + b.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *BuildConfigApplyConfiguration) WithAPIVersion(value string) *BuildConfigApplyConfiguration { + b.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *BuildConfigApplyConfiguration) WithName(value string) *BuildConfigApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *BuildConfigApplyConfiguration) WithGenerateName(value string) *BuildConfigApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *BuildConfigApplyConfiguration) WithNamespace(value string) *BuildConfigApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *BuildConfigApplyConfiguration) WithUID(value types.UID) *BuildConfigApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *BuildConfigApplyConfiguration) WithResourceVersion(value string) *BuildConfigApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *BuildConfigApplyConfiguration) WithGeneration(value int64) *BuildConfigApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *BuildConfigApplyConfiguration) WithCreationTimestamp(value metav1.Time) *BuildConfigApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *BuildConfigApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *BuildConfigApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *BuildConfigApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *BuildConfigApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *BuildConfigApplyConfiguration) WithLabels(entries map[string]string) *BuildConfigApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Labels == nil && len(entries) > 0 { + b.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *BuildConfigApplyConfiguration) WithAnnotations(entries map[string]string) *BuildConfigApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Annotations == nil && len(entries) > 0 { + b.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *BuildConfigApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *BuildConfigApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.OwnerReferences = append(b.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *BuildConfigApplyConfiguration) WithFinalizers(values ...string) *BuildConfigApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.Finalizers = append(b.Finalizers, values[i]) + } + return b +} + +func (b *BuildConfigApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *BuildConfigApplyConfiguration) WithSpec(value *BuildConfigSpecApplyConfiguration) *BuildConfigApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *BuildConfigApplyConfiguration) WithStatus(value *BuildConfigStatusApplyConfiguration) *BuildConfigApplyConfiguration { + b.Status = value + return b +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildconfigspec.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildconfigspec.go new file mode 100644 index 000000000..8f64b7dd7 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildconfigspec.go @@ -0,0 +1,141 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + buildv1 "github.com/openshift/api/build/v1" + corev1 "k8s.io/api/core/v1" +) + +// BuildConfigSpecApplyConfiguration represents an declarative configuration of the BuildConfigSpec type for use +// with apply. +type BuildConfigSpecApplyConfiguration struct { + Triggers []BuildTriggerPolicyApplyConfiguration `json:"triggers,omitempty"` + RunPolicy *buildv1.BuildRunPolicy `json:"runPolicy,omitempty"` + CommonSpecApplyConfiguration `json:",inline"` + SuccessfulBuildsHistoryLimit *int32 `json:"successfulBuildsHistoryLimit,omitempty"` + FailedBuildsHistoryLimit *int32 `json:"failedBuildsHistoryLimit,omitempty"` +} + +// BuildConfigSpecApplyConfiguration constructs an declarative configuration of the BuildConfigSpec type for use with +// apply. +func BuildConfigSpec() *BuildConfigSpecApplyConfiguration { + return &BuildConfigSpecApplyConfiguration{} +} + +// WithTriggers adds the given value to the Triggers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Triggers field. +func (b *BuildConfigSpecApplyConfiguration) WithTriggers(values ...*BuildTriggerPolicyApplyConfiguration) *BuildConfigSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithTriggers") + } + b.Triggers = append(b.Triggers, *values[i]) + } + return b +} + +// WithRunPolicy sets the RunPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RunPolicy field is set to the value of the last call. +func (b *BuildConfigSpecApplyConfiguration) WithRunPolicy(value buildv1.BuildRunPolicy) *BuildConfigSpecApplyConfiguration { + b.RunPolicy = &value + return b +} + +// WithServiceAccount sets the ServiceAccount field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ServiceAccount field is set to the value of the last call. +func (b *BuildConfigSpecApplyConfiguration) WithServiceAccount(value string) *BuildConfigSpecApplyConfiguration { + b.ServiceAccount = &value + return b +} + +// WithSource sets the Source field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Source field is set to the value of the last call. +func (b *BuildConfigSpecApplyConfiguration) WithSource(value *BuildSourceApplyConfiguration) *BuildConfigSpecApplyConfiguration { + b.Source = value + return b +} + +// WithRevision sets the Revision field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Revision field is set to the value of the last call. +func (b *BuildConfigSpecApplyConfiguration) WithRevision(value *SourceRevisionApplyConfiguration) *BuildConfigSpecApplyConfiguration { + b.Revision = value + return b +} + +// WithStrategy sets the Strategy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Strategy field is set to the value of the last call. +func (b *BuildConfigSpecApplyConfiguration) WithStrategy(value *BuildStrategyApplyConfiguration) *BuildConfigSpecApplyConfiguration { + b.Strategy = value + return b +} + +// WithOutput sets the Output field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Output field is set to the value of the last call. +func (b *BuildConfigSpecApplyConfiguration) WithOutput(value *BuildOutputApplyConfiguration) *BuildConfigSpecApplyConfiguration { + b.Output = value + return b +} + +// WithResources sets the Resources field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Resources field is set to the value of the last call. +func (b *BuildConfigSpecApplyConfiguration) WithResources(value corev1.ResourceRequirements) *BuildConfigSpecApplyConfiguration { + b.Resources = &value + return b +} + +// WithPostCommit sets the PostCommit field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PostCommit field is set to the value of the last call. +func (b *BuildConfigSpecApplyConfiguration) WithPostCommit(value *BuildPostCommitSpecApplyConfiguration) *BuildConfigSpecApplyConfiguration { + b.PostCommit = value + return b +} + +// WithCompletionDeadlineSeconds sets the CompletionDeadlineSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CompletionDeadlineSeconds field is set to the value of the last call. +func (b *BuildConfigSpecApplyConfiguration) WithCompletionDeadlineSeconds(value int64) *BuildConfigSpecApplyConfiguration { + b.CompletionDeadlineSeconds = &value + return b +} + +// WithNodeSelector sets the NodeSelector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NodeSelector field is set to the value of the last call. +func (b *BuildConfigSpecApplyConfiguration) WithNodeSelector(value buildv1.OptionalNodeSelector) *BuildConfigSpecApplyConfiguration { + b.NodeSelector = &value + return b +} + +// WithMountTrustedCA sets the MountTrustedCA field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MountTrustedCA field is set to the value of the last call. +func (b *BuildConfigSpecApplyConfiguration) WithMountTrustedCA(value bool) *BuildConfigSpecApplyConfiguration { + b.MountTrustedCA = &value + return b +} + +// WithSuccessfulBuildsHistoryLimit sets the SuccessfulBuildsHistoryLimit field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SuccessfulBuildsHistoryLimit field is set to the value of the last call. +func (b *BuildConfigSpecApplyConfiguration) WithSuccessfulBuildsHistoryLimit(value int32) *BuildConfigSpecApplyConfiguration { + b.SuccessfulBuildsHistoryLimit = &value + return b +} + +// WithFailedBuildsHistoryLimit sets the FailedBuildsHistoryLimit field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the FailedBuildsHistoryLimit field is set to the value of the last call. +func (b *BuildConfigSpecApplyConfiguration) WithFailedBuildsHistoryLimit(value int32) *BuildConfigSpecApplyConfiguration { + b.FailedBuildsHistoryLimit = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildconfigstatus.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildconfigstatus.go new file mode 100644 index 000000000..b4f11305a --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildconfigstatus.go @@ -0,0 +1,37 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// BuildConfigStatusApplyConfiguration represents an declarative configuration of the BuildConfigStatus type for use +// with apply. +type BuildConfigStatusApplyConfiguration struct { + LastVersion *int64 `json:"lastVersion,omitempty"` + ImageChangeTriggers []ImageChangeTriggerStatusApplyConfiguration `json:"imageChangeTriggers,omitempty"` +} + +// BuildConfigStatusApplyConfiguration constructs an declarative configuration of the BuildConfigStatus type for use with +// apply. +func BuildConfigStatus() *BuildConfigStatusApplyConfiguration { + return &BuildConfigStatusApplyConfiguration{} +} + +// WithLastVersion sets the LastVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LastVersion field is set to the value of the last call. +func (b *BuildConfigStatusApplyConfiguration) WithLastVersion(value int64) *BuildConfigStatusApplyConfiguration { + b.LastVersion = &value + return b +} + +// WithImageChangeTriggers adds the given value to the ImageChangeTriggers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ImageChangeTriggers field. +func (b *BuildConfigStatusApplyConfiguration) WithImageChangeTriggers(values ...*ImageChangeTriggerStatusApplyConfiguration) *BuildConfigStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithImageChangeTriggers") + } + b.ImageChangeTriggers = append(b.ImageChangeTriggers, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildoutput.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildoutput.go new file mode 100644 index 000000000..414ece741 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildoutput.go @@ -0,0 +1,50 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/core/v1" +) + +// BuildOutputApplyConfiguration represents an declarative configuration of the BuildOutput type for use +// with apply. +type BuildOutputApplyConfiguration struct { + To *v1.ObjectReference `json:"to,omitempty"` + PushSecret *v1.LocalObjectReference `json:"pushSecret,omitempty"` + ImageLabels []ImageLabelApplyConfiguration `json:"imageLabels,omitempty"` +} + +// BuildOutputApplyConfiguration constructs an declarative configuration of the BuildOutput type for use with +// apply. +func BuildOutput() *BuildOutputApplyConfiguration { + return &BuildOutputApplyConfiguration{} +} + +// WithTo sets the To field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the To field is set to the value of the last call. +func (b *BuildOutputApplyConfiguration) WithTo(value v1.ObjectReference) *BuildOutputApplyConfiguration { + b.To = &value + return b +} + +// WithPushSecret sets the PushSecret field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PushSecret field is set to the value of the last call. +func (b *BuildOutputApplyConfiguration) WithPushSecret(value v1.LocalObjectReference) *BuildOutputApplyConfiguration { + b.PushSecret = &value + return b +} + +// WithImageLabels adds the given value to the ImageLabels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ImageLabels field. +func (b *BuildOutputApplyConfiguration) WithImageLabels(values ...*ImageLabelApplyConfiguration) *BuildOutputApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithImageLabels") + } + b.ImageLabels = append(b.ImageLabels, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildpostcommitspec.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildpostcommitspec.go new file mode 100644 index 000000000..adbc8e529 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildpostcommitspec.go @@ -0,0 +1,45 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// BuildPostCommitSpecApplyConfiguration represents an declarative configuration of the BuildPostCommitSpec type for use +// with apply. +type BuildPostCommitSpecApplyConfiguration struct { + Command []string `json:"command,omitempty"` + Args []string `json:"args,omitempty"` + Script *string `json:"script,omitempty"` +} + +// BuildPostCommitSpecApplyConfiguration constructs an declarative configuration of the BuildPostCommitSpec type for use with +// apply. +func BuildPostCommitSpec() *BuildPostCommitSpecApplyConfiguration { + return &BuildPostCommitSpecApplyConfiguration{} +} + +// WithCommand adds the given value to the Command field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Command field. +func (b *BuildPostCommitSpecApplyConfiguration) WithCommand(values ...string) *BuildPostCommitSpecApplyConfiguration { + for i := range values { + b.Command = append(b.Command, values[i]) + } + return b +} + +// WithArgs adds the given value to the Args field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Args field. +func (b *BuildPostCommitSpecApplyConfiguration) WithArgs(values ...string) *BuildPostCommitSpecApplyConfiguration { + for i := range values { + b.Args = append(b.Args, values[i]) + } + return b +} + +// WithScript sets the Script field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Script field is set to the value of the last call. +func (b *BuildPostCommitSpecApplyConfiguration) WithScript(value string) *BuildPostCommitSpecApplyConfiguration { + b.Script = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildsource.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildsource.go new file mode 100644 index 000000000..3f788b3b1 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildsource.go @@ -0,0 +1,115 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/openshift/api/build/v1" + corev1 "k8s.io/api/core/v1" +) + +// BuildSourceApplyConfiguration represents an declarative configuration of the BuildSource type for use +// with apply. +type BuildSourceApplyConfiguration struct { + Type *v1.BuildSourceType `json:"type,omitempty"` + Binary *BinaryBuildSourceApplyConfiguration `json:"binary,omitempty"` + Dockerfile *string `json:"dockerfile,omitempty"` + Git *GitBuildSourceApplyConfiguration `json:"git,omitempty"` + Images []ImageSourceApplyConfiguration `json:"images,omitempty"` + ContextDir *string `json:"contextDir,omitempty"` + SourceSecret *corev1.LocalObjectReference `json:"sourceSecret,omitempty"` + Secrets []SecretBuildSourceApplyConfiguration `json:"secrets,omitempty"` + ConfigMaps []ConfigMapBuildSourceApplyConfiguration `json:"configMaps,omitempty"` +} + +// BuildSourceApplyConfiguration constructs an declarative configuration of the BuildSource type for use with +// apply. +func BuildSource() *BuildSourceApplyConfiguration { + return &BuildSourceApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *BuildSourceApplyConfiguration) WithType(value v1.BuildSourceType) *BuildSourceApplyConfiguration { + b.Type = &value + return b +} + +// WithBinary sets the Binary field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Binary field is set to the value of the last call. +func (b *BuildSourceApplyConfiguration) WithBinary(value *BinaryBuildSourceApplyConfiguration) *BuildSourceApplyConfiguration { + b.Binary = value + return b +} + +// WithDockerfile sets the Dockerfile field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Dockerfile field is set to the value of the last call. +func (b *BuildSourceApplyConfiguration) WithDockerfile(value string) *BuildSourceApplyConfiguration { + b.Dockerfile = &value + return b +} + +// WithGit sets the Git field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Git field is set to the value of the last call. +func (b *BuildSourceApplyConfiguration) WithGit(value *GitBuildSourceApplyConfiguration) *BuildSourceApplyConfiguration { + b.Git = value + return b +} + +// WithImages adds the given value to the Images field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Images field. +func (b *BuildSourceApplyConfiguration) WithImages(values ...*ImageSourceApplyConfiguration) *BuildSourceApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithImages") + } + b.Images = append(b.Images, *values[i]) + } + return b +} + +// WithContextDir sets the ContextDir field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ContextDir field is set to the value of the last call. +func (b *BuildSourceApplyConfiguration) WithContextDir(value string) *BuildSourceApplyConfiguration { + b.ContextDir = &value + return b +} + +// WithSourceSecret sets the SourceSecret field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SourceSecret field is set to the value of the last call. +func (b *BuildSourceApplyConfiguration) WithSourceSecret(value corev1.LocalObjectReference) *BuildSourceApplyConfiguration { + b.SourceSecret = &value + return b +} + +// WithSecrets adds the given value to the Secrets field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Secrets field. +func (b *BuildSourceApplyConfiguration) WithSecrets(values ...*SecretBuildSourceApplyConfiguration) *BuildSourceApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithSecrets") + } + b.Secrets = append(b.Secrets, *values[i]) + } + return b +} + +// WithConfigMaps adds the given value to the ConfigMaps field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ConfigMaps field. +func (b *BuildSourceApplyConfiguration) WithConfigMaps(values ...*ConfigMapBuildSourceApplyConfiguration) *BuildSourceApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConfigMaps") + } + b.ConfigMaps = append(b.ConfigMaps, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildspec.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildspec.go new file mode 100644 index 000000000..3567c5107 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildspec.go @@ -0,0 +1,114 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + buildv1 "github.com/openshift/api/build/v1" + corev1 "k8s.io/api/core/v1" +) + +// BuildSpecApplyConfiguration represents an declarative configuration of the BuildSpec type for use +// with apply. +type BuildSpecApplyConfiguration struct { + CommonSpecApplyConfiguration `json:",inline"` + TriggeredBy []BuildTriggerCauseApplyConfiguration `json:"triggeredBy,omitempty"` +} + +// BuildSpecApplyConfiguration constructs an declarative configuration of the BuildSpec type for use with +// apply. +func BuildSpec() *BuildSpecApplyConfiguration { + return &BuildSpecApplyConfiguration{} +} + +// WithServiceAccount sets the ServiceAccount field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ServiceAccount field is set to the value of the last call. +func (b *BuildSpecApplyConfiguration) WithServiceAccount(value string) *BuildSpecApplyConfiguration { + b.ServiceAccount = &value + return b +} + +// WithSource sets the Source field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Source field is set to the value of the last call. +func (b *BuildSpecApplyConfiguration) WithSource(value *BuildSourceApplyConfiguration) *BuildSpecApplyConfiguration { + b.Source = value + return b +} + +// WithRevision sets the Revision field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Revision field is set to the value of the last call. +func (b *BuildSpecApplyConfiguration) WithRevision(value *SourceRevisionApplyConfiguration) *BuildSpecApplyConfiguration { + b.Revision = value + return b +} + +// WithStrategy sets the Strategy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Strategy field is set to the value of the last call. +func (b *BuildSpecApplyConfiguration) WithStrategy(value *BuildStrategyApplyConfiguration) *BuildSpecApplyConfiguration { + b.Strategy = value + return b +} + +// WithOutput sets the Output field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Output field is set to the value of the last call. +func (b *BuildSpecApplyConfiguration) WithOutput(value *BuildOutputApplyConfiguration) *BuildSpecApplyConfiguration { + b.Output = value + return b +} + +// WithResources sets the Resources field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Resources field is set to the value of the last call. +func (b *BuildSpecApplyConfiguration) WithResources(value corev1.ResourceRequirements) *BuildSpecApplyConfiguration { + b.Resources = &value + return b +} + +// WithPostCommit sets the PostCommit field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PostCommit field is set to the value of the last call. +func (b *BuildSpecApplyConfiguration) WithPostCommit(value *BuildPostCommitSpecApplyConfiguration) *BuildSpecApplyConfiguration { + b.PostCommit = value + return b +} + +// WithCompletionDeadlineSeconds sets the CompletionDeadlineSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CompletionDeadlineSeconds field is set to the value of the last call. +func (b *BuildSpecApplyConfiguration) WithCompletionDeadlineSeconds(value int64) *BuildSpecApplyConfiguration { + b.CompletionDeadlineSeconds = &value + return b +} + +// WithNodeSelector sets the NodeSelector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NodeSelector field is set to the value of the last call. +func (b *BuildSpecApplyConfiguration) WithNodeSelector(value buildv1.OptionalNodeSelector) *BuildSpecApplyConfiguration { + b.NodeSelector = &value + return b +} + +// WithMountTrustedCA sets the MountTrustedCA field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MountTrustedCA field is set to the value of the last call. +func (b *BuildSpecApplyConfiguration) WithMountTrustedCA(value bool) *BuildSpecApplyConfiguration { + b.MountTrustedCA = &value + return b +} + +// WithTriggeredBy adds the given value to the TriggeredBy field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the TriggeredBy field. +func (b *BuildSpecApplyConfiguration) WithTriggeredBy(values ...*BuildTriggerCauseApplyConfiguration) *BuildSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithTriggeredBy") + } + b.TriggeredBy = append(b.TriggeredBy, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildstatus.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildstatus.go new file mode 100644 index 000000000..48a0fc557 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildstatus.go @@ -0,0 +1,149 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + time "time" + + v1 "github.com/openshift/api/build/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// BuildStatusApplyConfiguration represents an declarative configuration of the BuildStatus type for use +// with apply. +type BuildStatusApplyConfiguration struct { + Phase *v1.BuildPhase `json:"phase,omitempty"` + Cancelled *bool `json:"cancelled,omitempty"` + Reason *v1.StatusReason `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` + StartTimestamp *metav1.Time `json:"startTimestamp,omitempty"` + CompletionTimestamp *metav1.Time `json:"completionTimestamp,omitempty"` + Duration *time.Duration `json:"duration,omitempty"` + OutputDockerImageReference *string `json:"outputDockerImageReference,omitempty"` + Config *corev1.ObjectReference `json:"config,omitempty"` + Output *BuildStatusOutputApplyConfiguration `json:"output,omitempty"` + Stages []StageInfoApplyConfiguration `json:"stages,omitempty"` + LogSnippet *string `json:"logSnippet,omitempty"` + Conditions []BuildConditionApplyConfiguration `json:"conditions,omitempty"` +} + +// BuildStatusApplyConfiguration constructs an declarative configuration of the BuildStatus type for use with +// apply. +func BuildStatus() *BuildStatusApplyConfiguration { + return &BuildStatusApplyConfiguration{} +} + +// WithPhase sets the Phase field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Phase field is set to the value of the last call. +func (b *BuildStatusApplyConfiguration) WithPhase(value v1.BuildPhase) *BuildStatusApplyConfiguration { + b.Phase = &value + return b +} + +// WithCancelled sets the Cancelled field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Cancelled field is set to the value of the last call. +func (b *BuildStatusApplyConfiguration) WithCancelled(value bool) *BuildStatusApplyConfiguration { + b.Cancelled = &value + return b +} + +// WithReason sets the Reason field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Reason field is set to the value of the last call. +func (b *BuildStatusApplyConfiguration) WithReason(value v1.StatusReason) *BuildStatusApplyConfiguration { + b.Reason = &value + return b +} + +// WithMessage sets the Message field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Message field is set to the value of the last call. +func (b *BuildStatusApplyConfiguration) WithMessage(value string) *BuildStatusApplyConfiguration { + b.Message = &value + return b +} + +// WithStartTimestamp sets the StartTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the StartTimestamp field is set to the value of the last call. +func (b *BuildStatusApplyConfiguration) WithStartTimestamp(value metav1.Time) *BuildStatusApplyConfiguration { + b.StartTimestamp = &value + return b +} + +// WithCompletionTimestamp sets the CompletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CompletionTimestamp field is set to the value of the last call. +func (b *BuildStatusApplyConfiguration) WithCompletionTimestamp(value metav1.Time) *BuildStatusApplyConfiguration { + b.CompletionTimestamp = &value + return b +} + +// WithDuration sets the Duration field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Duration field is set to the value of the last call. +func (b *BuildStatusApplyConfiguration) WithDuration(value time.Duration) *BuildStatusApplyConfiguration { + b.Duration = &value + return b +} + +// WithOutputDockerImageReference sets the OutputDockerImageReference field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the OutputDockerImageReference field is set to the value of the last call. +func (b *BuildStatusApplyConfiguration) WithOutputDockerImageReference(value string) *BuildStatusApplyConfiguration { + b.OutputDockerImageReference = &value + return b +} + +// WithConfig sets the Config field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Config field is set to the value of the last call. +func (b *BuildStatusApplyConfiguration) WithConfig(value corev1.ObjectReference) *BuildStatusApplyConfiguration { + b.Config = &value + return b +} + +// WithOutput sets the Output field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Output field is set to the value of the last call. +func (b *BuildStatusApplyConfiguration) WithOutput(value *BuildStatusOutputApplyConfiguration) *BuildStatusApplyConfiguration { + b.Output = value + return b +} + +// WithStages adds the given value to the Stages field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Stages field. +func (b *BuildStatusApplyConfiguration) WithStages(values ...*StageInfoApplyConfiguration) *BuildStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithStages") + } + b.Stages = append(b.Stages, *values[i]) + } + return b +} + +// WithLogSnippet sets the LogSnippet field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LogSnippet field is set to the value of the last call. +func (b *BuildStatusApplyConfiguration) WithLogSnippet(value string) *BuildStatusApplyConfiguration { + b.LogSnippet = &value + return b +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *BuildStatusApplyConfiguration) WithConditions(values ...*BuildConditionApplyConfiguration) *BuildStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.Conditions = append(b.Conditions, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildstatusoutput.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildstatusoutput.go new file mode 100644 index 000000000..cfc79fef5 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildstatusoutput.go @@ -0,0 +1,23 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// BuildStatusOutputApplyConfiguration represents an declarative configuration of the BuildStatusOutput type for use +// with apply. +type BuildStatusOutputApplyConfiguration struct { + To *BuildStatusOutputToApplyConfiguration `json:"to,omitempty"` +} + +// BuildStatusOutputApplyConfiguration constructs an declarative configuration of the BuildStatusOutput type for use with +// apply. +func BuildStatusOutput() *BuildStatusOutputApplyConfiguration { + return &BuildStatusOutputApplyConfiguration{} +} + +// WithTo sets the To field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the To field is set to the value of the last call. +func (b *BuildStatusOutputApplyConfiguration) WithTo(value *BuildStatusOutputToApplyConfiguration) *BuildStatusOutputApplyConfiguration { + b.To = value + return b +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildstatusoutputto.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildstatusoutputto.go new file mode 100644 index 000000000..beafa5a95 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildstatusoutputto.go @@ -0,0 +1,23 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// BuildStatusOutputToApplyConfiguration represents an declarative configuration of the BuildStatusOutputTo type for use +// with apply. +type BuildStatusOutputToApplyConfiguration struct { + ImageDigest *string `json:"imageDigest,omitempty"` +} + +// BuildStatusOutputToApplyConfiguration constructs an declarative configuration of the BuildStatusOutputTo type for use with +// apply. +func BuildStatusOutputTo() *BuildStatusOutputToApplyConfiguration { + return &BuildStatusOutputToApplyConfiguration{} +} + +// WithImageDigest sets the ImageDigest field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ImageDigest field is set to the value of the last call. +func (b *BuildStatusOutputToApplyConfiguration) WithImageDigest(value string) *BuildStatusOutputToApplyConfiguration { + b.ImageDigest = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildstrategy.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildstrategy.go new file mode 100644 index 000000000..d1fe4f5cf --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildstrategy.go @@ -0,0 +1,63 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/openshift/api/build/v1" +) + +// BuildStrategyApplyConfiguration represents an declarative configuration of the BuildStrategy type for use +// with apply. +type BuildStrategyApplyConfiguration struct { + Type *v1.BuildStrategyType `json:"type,omitempty"` + DockerStrategy *DockerBuildStrategyApplyConfiguration `json:"dockerStrategy,omitempty"` + SourceStrategy *SourceBuildStrategyApplyConfiguration `json:"sourceStrategy,omitempty"` + CustomStrategy *CustomBuildStrategyApplyConfiguration `json:"customStrategy,omitempty"` + JenkinsPipelineStrategy *JenkinsPipelineBuildStrategyApplyConfiguration `json:"jenkinsPipelineStrategy,omitempty"` +} + +// BuildStrategyApplyConfiguration constructs an declarative configuration of the BuildStrategy type for use with +// apply. +func BuildStrategy() *BuildStrategyApplyConfiguration { + return &BuildStrategyApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *BuildStrategyApplyConfiguration) WithType(value v1.BuildStrategyType) *BuildStrategyApplyConfiguration { + b.Type = &value + return b +} + +// WithDockerStrategy sets the DockerStrategy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DockerStrategy field is set to the value of the last call. +func (b *BuildStrategyApplyConfiguration) WithDockerStrategy(value *DockerBuildStrategyApplyConfiguration) *BuildStrategyApplyConfiguration { + b.DockerStrategy = value + return b +} + +// WithSourceStrategy sets the SourceStrategy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SourceStrategy field is set to the value of the last call. +func (b *BuildStrategyApplyConfiguration) WithSourceStrategy(value *SourceBuildStrategyApplyConfiguration) *BuildStrategyApplyConfiguration { + b.SourceStrategy = value + return b +} + +// WithCustomStrategy sets the CustomStrategy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CustomStrategy field is set to the value of the last call. +func (b *BuildStrategyApplyConfiguration) WithCustomStrategy(value *CustomBuildStrategyApplyConfiguration) *BuildStrategyApplyConfiguration { + b.CustomStrategy = value + return b +} + +// WithJenkinsPipelineStrategy sets the JenkinsPipelineStrategy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the JenkinsPipelineStrategy field is set to the value of the last call. +func (b *BuildStrategyApplyConfiguration) WithJenkinsPipelineStrategy(value *JenkinsPipelineBuildStrategyApplyConfiguration) *BuildStrategyApplyConfiguration { + b.JenkinsPipelineStrategy = value + return b +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildtriggercause.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildtriggercause.go new file mode 100644 index 000000000..285aa6f61 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildtriggercause.go @@ -0,0 +1,68 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// BuildTriggerCauseApplyConfiguration represents an declarative configuration of the BuildTriggerCause type for use +// with apply. +type BuildTriggerCauseApplyConfiguration struct { + Message *string `json:"message,omitempty"` + GenericWebHook *GenericWebHookCauseApplyConfiguration `json:"genericWebHook,omitempty"` + GitHubWebHook *GitHubWebHookCauseApplyConfiguration `json:"githubWebHook,omitempty"` + ImageChangeBuild *ImageChangeCauseApplyConfiguration `json:"imageChangeBuild,omitempty"` + GitLabWebHook *GitLabWebHookCauseApplyConfiguration `json:"gitlabWebHook,omitempty"` + BitbucketWebHook *BitbucketWebHookCauseApplyConfiguration `json:"bitbucketWebHook,omitempty"` +} + +// BuildTriggerCauseApplyConfiguration constructs an declarative configuration of the BuildTriggerCause type for use with +// apply. +func BuildTriggerCause() *BuildTriggerCauseApplyConfiguration { + return &BuildTriggerCauseApplyConfiguration{} +} + +// WithMessage sets the Message field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Message field is set to the value of the last call. +func (b *BuildTriggerCauseApplyConfiguration) WithMessage(value string) *BuildTriggerCauseApplyConfiguration { + b.Message = &value + return b +} + +// WithGenericWebHook sets the GenericWebHook field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenericWebHook field is set to the value of the last call. +func (b *BuildTriggerCauseApplyConfiguration) WithGenericWebHook(value *GenericWebHookCauseApplyConfiguration) *BuildTriggerCauseApplyConfiguration { + b.GenericWebHook = value + return b +} + +// WithGitHubWebHook sets the GitHubWebHook field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GitHubWebHook field is set to the value of the last call. +func (b *BuildTriggerCauseApplyConfiguration) WithGitHubWebHook(value *GitHubWebHookCauseApplyConfiguration) *BuildTriggerCauseApplyConfiguration { + b.GitHubWebHook = value + return b +} + +// WithImageChangeBuild sets the ImageChangeBuild field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ImageChangeBuild field is set to the value of the last call. +func (b *BuildTriggerCauseApplyConfiguration) WithImageChangeBuild(value *ImageChangeCauseApplyConfiguration) *BuildTriggerCauseApplyConfiguration { + b.ImageChangeBuild = value + return b +} + +// WithGitLabWebHook sets the GitLabWebHook field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GitLabWebHook field is set to the value of the last call. +func (b *BuildTriggerCauseApplyConfiguration) WithGitLabWebHook(value *GitLabWebHookCauseApplyConfiguration) *BuildTriggerCauseApplyConfiguration { + b.GitLabWebHook = value + return b +} + +// WithBitbucketWebHook sets the BitbucketWebHook field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the BitbucketWebHook field is set to the value of the last call. +func (b *BuildTriggerCauseApplyConfiguration) WithBitbucketWebHook(value *BitbucketWebHookCauseApplyConfiguration) *BuildTriggerCauseApplyConfiguration { + b.BitbucketWebHook = value + return b +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildtriggerpolicy.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildtriggerpolicy.go new file mode 100644 index 000000000..befc33df5 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildtriggerpolicy.go @@ -0,0 +1,72 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/openshift/api/build/v1" +) + +// BuildTriggerPolicyApplyConfiguration represents an declarative configuration of the BuildTriggerPolicy type for use +// with apply. +type BuildTriggerPolicyApplyConfiguration struct { + Type *v1.BuildTriggerType `json:"type,omitempty"` + GitHubWebHook *WebHookTriggerApplyConfiguration `json:"github,omitempty"` + GenericWebHook *WebHookTriggerApplyConfiguration `json:"generic,omitempty"` + ImageChange *ImageChangeTriggerApplyConfiguration `json:"imageChange,omitempty"` + GitLabWebHook *WebHookTriggerApplyConfiguration `json:"gitlab,omitempty"` + BitbucketWebHook *WebHookTriggerApplyConfiguration `json:"bitbucket,omitempty"` +} + +// BuildTriggerPolicyApplyConfiguration constructs an declarative configuration of the BuildTriggerPolicy type for use with +// apply. +func BuildTriggerPolicy() *BuildTriggerPolicyApplyConfiguration { + return &BuildTriggerPolicyApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *BuildTriggerPolicyApplyConfiguration) WithType(value v1.BuildTriggerType) *BuildTriggerPolicyApplyConfiguration { + b.Type = &value + return b +} + +// WithGitHubWebHook sets the GitHubWebHook field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GitHubWebHook field is set to the value of the last call. +func (b *BuildTriggerPolicyApplyConfiguration) WithGitHubWebHook(value *WebHookTriggerApplyConfiguration) *BuildTriggerPolicyApplyConfiguration { + b.GitHubWebHook = value + return b +} + +// WithGenericWebHook sets the GenericWebHook field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenericWebHook field is set to the value of the last call. +func (b *BuildTriggerPolicyApplyConfiguration) WithGenericWebHook(value *WebHookTriggerApplyConfiguration) *BuildTriggerPolicyApplyConfiguration { + b.GenericWebHook = value + return b +} + +// WithImageChange sets the ImageChange field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ImageChange field is set to the value of the last call. +func (b *BuildTriggerPolicyApplyConfiguration) WithImageChange(value *ImageChangeTriggerApplyConfiguration) *BuildTriggerPolicyApplyConfiguration { + b.ImageChange = value + return b +} + +// WithGitLabWebHook sets the GitLabWebHook field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GitLabWebHook field is set to the value of the last call. +func (b *BuildTriggerPolicyApplyConfiguration) WithGitLabWebHook(value *WebHookTriggerApplyConfiguration) *BuildTriggerPolicyApplyConfiguration { + b.GitLabWebHook = value + return b +} + +// WithBitbucketWebHook sets the BitbucketWebHook field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the BitbucketWebHook field is set to the value of the last call. +func (b *BuildTriggerPolicyApplyConfiguration) WithBitbucketWebHook(value *WebHookTriggerApplyConfiguration) *BuildTriggerPolicyApplyConfiguration { + b.BitbucketWebHook = value + return b +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildvolume.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildvolume.go new file mode 100644 index 000000000..6f3bdf1b7 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildvolume.go @@ -0,0 +1,46 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// BuildVolumeApplyConfiguration represents an declarative configuration of the BuildVolume type for use +// with apply. +type BuildVolumeApplyConfiguration struct { + Name *string `json:"name,omitempty"` + Source *BuildVolumeSourceApplyConfiguration `json:"source,omitempty"` + Mounts []BuildVolumeMountApplyConfiguration `json:"mounts,omitempty"` +} + +// BuildVolumeApplyConfiguration constructs an declarative configuration of the BuildVolume type for use with +// apply. +func BuildVolume() *BuildVolumeApplyConfiguration { + return &BuildVolumeApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *BuildVolumeApplyConfiguration) WithName(value string) *BuildVolumeApplyConfiguration { + b.Name = &value + return b +} + +// WithSource sets the Source field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Source field is set to the value of the last call. +func (b *BuildVolumeApplyConfiguration) WithSource(value *BuildVolumeSourceApplyConfiguration) *BuildVolumeApplyConfiguration { + b.Source = value + return b +} + +// WithMounts adds the given value to the Mounts field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Mounts field. +func (b *BuildVolumeApplyConfiguration) WithMounts(values ...*BuildVolumeMountApplyConfiguration) *BuildVolumeApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithMounts") + } + b.Mounts = append(b.Mounts, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildvolumemount.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildvolumemount.go new file mode 100644 index 000000000..15686ad2d --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildvolumemount.go @@ -0,0 +1,23 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// BuildVolumeMountApplyConfiguration represents an declarative configuration of the BuildVolumeMount type for use +// with apply. +type BuildVolumeMountApplyConfiguration struct { + DestinationPath *string `json:"destinationPath,omitempty"` +} + +// BuildVolumeMountApplyConfiguration constructs an declarative configuration of the BuildVolumeMount type for use with +// apply. +func BuildVolumeMount() *BuildVolumeMountApplyConfiguration { + return &BuildVolumeMountApplyConfiguration{} +} + +// WithDestinationPath sets the DestinationPath field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DestinationPath field is set to the value of the last call. +func (b *BuildVolumeMountApplyConfiguration) WithDestinationPath(value string) *BuildVolumeMountApplyConfiguration { + b.DestinationPath = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildvolumesource.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildvolumesource.go new file mode 100644 index 000000000..0ae9256ca --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/buildvolumesource.go @@ -0,0 +1,55 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/openshift/api/build/v1" + corev1 "k8s.io/api/core/v1" +) + +// BuildVolumeSourceApplyConfiguration represents an declarative configuration of the BuildVolumeSource type for use +// with apply. +type BuildVolumeSourceApplyConfiguration struct { + Type *v1.BuildVolumeSourceType `json:"type,omitempty"` + Secret *corev1.SecretVolumeSource `json:"secret,omitempty"` + ConfigMap *corev1.ConfigMapVolumeSource `json:"configMap,omitempty"` + CSI *corev1.CSIVolumeSource `json:"csi,omitempty"` +} + +// BuildVolumeSourceApplyConfiguration constructs an declarative configuration of the BuildVolumeSource type for use with +// apply. +func BuildVolumeSource() *BuildVolumeSourceApplyConfiguration { + return &BuildVolumeSourceApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *BuildVolumeSourceApplyConfiguration) WithType(value v1.BuildVolumeSourceType) *BuildVolumeSourceApplyConfiguration { + b.Type = &value + return b +} + +// WithSecret sets the Secret field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Secret field is set to the value of the last call. +func (b *BuildVolumeSourceApplyConfiguration) WithSecret(value corev1.SecretVolumeSource) *BuildVolumeSourceApplyConfiguration { + b.Secret = &value + return b +} + +// WithConfigMap sets the ConfigMap field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ConfigMap field is set to the value of the last call. +func (b *BuildVolumeSourceApplyConfiguration) WithConfigMap(value corev1.ConfigMapVolumeSource) *BuildVolumeSourceApplyConfiguration { + b.ConfigMap = &value + return b +} + +// WithCSI sets the CSI field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CSI field is set to the value of the last call. +func (b *BuildVolumeSourceApplyConfiguration) WithCSI(value corev1.CSIVolumeSource) *BuildVolumeSourceApplyConfiguration { + b.CSI = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/commonspec.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/commonspec.go new file mode 100644 index 000000000..0e9aa3d22 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/commonspec.go @@ -0,0 +1,109 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + buildv1 "github.com/openshift/api/build/v1" + corev1 "k8s.io/api/core/v1" +) + +// CommonSpecApplyConfiguration represents an declarative configuration of the CommonSpec type for use +// with apply. +type CommonSpecApplyConfiguration struct { + ServiceAccount *string `json:"serviceAccount,omitempty"` + Source *BuildSourceApplyConfiguration `json:"source,omitempty"` + Revision *SourceRevisionApplyConfiguration `json:"revision,omitempty"` + Strategy *BuildStrategyApplyConfiguration `json:"strategy,omitempty"` + Output *BuildOutputApplyConfiguration `json:"output,omitempty"` + Resources *corev1.ResourceRequirements `json:"resources,omitempty"` + PostCommit *BuildPostCommitSpecApplyConfiguration `json:"postCommit,omitempty"` + CompletionDeadlineSeconds *int64 `json:"completionDeadlineSeconds,omitempty"` + NodeSelector *buildv1.OptionalNodeSelector `json:"nodeSelector,omitempty"` + MountTrustedCA *bool `json:"mountTrustedCA,omitempty"` +} + +// CommonSpecApplyConfiguration constructs an declarative configuration of the CommonSpec type for use with +// apply. +func CommonSpec() *CommonSpecApplyConfiguration { + return &CommonSpecApplyConfiguration{} +} + +// WithServiceAccount sets the ServiceAccount field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ServiceAccount field is set to the value of the last call. +func (b *CommonSpecApplyConfiguration) WithServiceAccount(value string) *CommonSpecApplyConfiguration { + b.ServiceAccount = &value + return b +} + +// WithSource sets the Source field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Source field is set to the value of the last call. +func (b *CommonSpecApplyConfiguration) WithSource(value *BuildSourceApplyConfiguration) *CommonSpecApplyConfiguration { + b.Source = value + return b +} + +// WithRevision sets the Revision field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Revision field is set to the value of the last call. +func (b *CommonSpecApplyConfiguration) WithRevision(value *SourceRevisionApplyConfiguration) *CommonSpecApplyConfiguration { + b.Revision = value + return b +} + +// WithStrategy sets the Strategy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Strategy field is set to the value of the last call. +func (b *CommonSpecApplyConfiguration) WithStrategy(value *BuildStrategyApplyConfiguration) *CommonSpecApplyConfiguration { + b.Strategy = value + return b +} + +// WithOutput sets the Output field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Output field is set to the value of the last call. +func (b *CommonSpecApplyConfiguration) WithOutput(value *BuildOutputApplyConfiguration) *CommonSpecApplyConfiguration { + b.Output = value + return b +} + +// WithResources sets the Resources field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Resources field is set to the value of the last call. +func (b *CommonSpecApplyConfiguration) WithResources(value corev1.ResourceRequirements) *CommonSpecApplyConfiguration { + b.Resources = &value + return b +} + +// WithPostCommit sets the PostCommit field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PostCommit field is set to the value of the last call. +func (b *CommonSpecApplyConfiguration) WithPostCommit(value *BuildPostCommitSpecApplyConfiguration) *CommonSpecApplyConfiguration { + b.PostCommit = value + return b +} + +// WithCompletionDeadlineSeconds sets the CompletionDeadlineSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CompletionDeadlineSeconds field is set to the value of the last call. +func (b *CommonSpecApplyConfiguration) WithCompletionDeadlineSeconds(value int64) *CommonSpecApplyConfiguration { + b.CompletionDeadlineSeconds = &value + return b +} + +// WithNodeSelector sets the NodeSelector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NodeSelector field is set to the value of the last call. +func (b *CommonSpecApplyConfiguration) WithNodeSelector(value buildv1.OptionalNodeSelector) *CommonSpecApplyConfiguration { + b.NodeSelector = &value + return b +} + +// WithMountTrustedCA sets the MountTrustedCA field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MountTrustedCA field is set to the value of the last call. +func (b *CommonSpecApplyConfiguration) WithMountTrustedCA(value bool) *CommonSpecApplyConfiguration { + b.MountTrustedCA = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/commonwebhookcause.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/commonwebhookcause.go new file mode 100644 index 000000000..207dfda41 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/commonwebhookcause.go @@ -0,0 +1,32 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// CommonWebHookCauseApplyConfiguration represents an declarative configuration of the CommonWebHookCause type for use +// with apply. +type CommonWebHookCauseApplyConfiguration struct { + Revision *SourceRevisionApplyConfiguration `json:"revision,omitempty"` + Secret *string `json:"secret,omitempty"` +} + +// CommonWebHookCauseApplyConfiguration constructs an declarative configuration of the CommonWebHookCause type for use with +// apply. +func CommonWebHookCause() *CommonWebHookCauseApplyConfiguration { + return &CommonWebHookCauseApplyConfiguration{} +} + +// WithRevision sets the Revision field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Revision field is set to the value of the last call. +func (b *CommonWebHookCauseApplyConfiguration) WithRevision(value *SourceRevisionApplyConfiguration) *CommonWebHookCauseApplyConfiguration { + b.Revision = value + return b +} + +// WithSecret sets the Secret field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Secret field is set to the value of the last call. +func (b *CommonWebHookCauseApplyConfiguration) WithSecret(value string) *CommonWebHookCauseApplyConfiguration { + b.Secret = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/configmapbuildsource.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/configmapbuildsource.go new file mode 100644 index 000000000..1ca6a1579 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/configmapbuildsource.go @@ -0,0 +1,36 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/core/v1" +) + +// ConfigMapBuildSourceApplyConfiguration represents an declarative configuration of the ConfigMapBuildSource type for use +// with apply. +type ConfigMapBuildSourceApplyConfiguration struct { + ConfigMap *v1.LocalObjectReference `json:"configMap,omitempty"` + DestinationDir *string `json:"destinationDir,omitempty"` +} + +// ConfigMapBuildSourceApplyConfiguration constructs an declarative configuration of the ConfigMapBuildSource type for use with +// apply. +func ConfigMapBuildSource() *ConfigMapBuildSourceApplyConfiguration { + return &ConfigMapBuildSourceApplyConfiguration{} +} + +// WithConfigMap sets the ConfigMap field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ConfigMap field is set to the value of the last call. +func (b *ConfigMapBuildSourceApplyConfiguration) WithConfigMap(value v1.LocalObjectReference) *ConfigMapBuildSourceApplyConfiguration { + b.ConfigMap = &value + return b +} + +// WithDestinationDir sets the DestinationDir field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DestinationDir field is set to the value of the last call. +func (b *ConfigMapBuildSourceApplyConfiguration) WithDestinationDir(value string) *ConfigMapBuildSourceApplyConfiguration { + b.DestinationDir = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/custombuildstrategy.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/custombuildstrategy.go new file mode 100644 index 000000000..c7bf1b2b6 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/custombuildstrategy.go @@ -0,0 +1,88 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/core/v1" +) + +// CustomBuildStrategyApplyConfiguration represents an declarative configuration of the CustomBuildStrategy type for use +// with apply. +type CustomBuildStrategyApplyConfiguration struct { + From *v1.ObjectReference `json:"from,omitempty"` + PullSecret *v1.LocalObjectReference `json:"pullSecret,omitempty"` + Env []v1.EnvVar `json:"env,omitempty"` + ExposeDockerSocket *bool `json:"exposeDockerSocket,omitempty"` + ForcePull *bool `json:"forcePull,omitempty"` + Secrets []SecretSpecApplyConfiguration `json:"secrets,omitempty"` + BuildAPIVersion *string `json:"buildAPIVersion,omitempty"` +} + +// CustomBuildStrategyApplyConfiguration constructs an declarative configuration of the CustomBuildStrategy type for use with +// apply. +func CustomBuildStrategy() *CustomBuildStrategyApplyConfiguration { + return &CustomBuildStrategyApplyConfiguration{} +} + +// WithFrom sets the From field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the From field is set to the value of the last call. +func (b *CustomBuildStrategyApplyConfiguration) WithFrom(value v1.ObjectReference) *CustomBuildStrategyApplyConfiguration { + b.From = &value + return b +} + +// WithPullSecret sets the PullSecret field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PullSecret field is set to the value of the last call. +func (b *CustomBuildStrategyApplyConfiguration) WithPullSecret(value v1.LocalObjectReference) *CustomBuildStrategyApplyConfiguration { + b.PullSecret = &value + return b +} + +// WithEnv adds the given value to the Env field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Env field. +func (b *CustomBuildStrategyApplyConfiguration) WithEnv(values ...v1.EnvVar) *CustomBuildStrategyApplyConfiguration { + for i := range values { + b.Env = append(b.Env, values[i]) + } + return b +} + +// WithExposeDockerSocket sets the ExposeDockerSocket field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ExposeDockerSocket field is set to the value of the last call. +func (b *CustomBuildStrategyApplyConfiguration) WithExposeDockerSocket(value bool) *CustomBuildStrategyApplyConfiguration { + b.ExposeDockerSocket = &value + return b +} + +// WithForcePull sets the ForcePull field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ForcePull field is set to the value of the last call. +func (b *CustomBuildStrategyApplyConfiguration) WithForcePull(value bool) *CustomBuildStrategyApplyConfiguration { + b.ForcePull = &value + return b +} + +// WithSecrets adds the given value to the Secrets field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Secrets field. +func (b *CustomBuildStrategyApplyConfiguration) WithSecrets(values ...*SecretSpecApplyConfiguration) *CustomBuildStrategyApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithSecrets") + } + b.Secrets = append(b.Secrets, *values[i]) + } + return b +} + +// WithBuildAPIVersion sets the BuildAPIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the BuildAPIVersion field is set to the value of the last call. +func (b *CustomBuildStrategyApplyConfiguration) WithBuildAPIVersion(value string) *CustomBuildStrategyApplyConfiguration { + b.BuildAPIVersion = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/dockerbuildstrategy.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/dockerbuildstrategy.go new file mode 100644 index 000000000..36ec78b02 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/dockerbuildstrategy.go @@ -0,0 +1,109 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + buildv1 "github.com/openshift/api/build/v1" + v1 "k8s.io/api/core/v1" +) + +// DockerBuildStrategyApplyConfiguration represents an declarative configuration of the DockerBuildStrategy type for use +// with apply. +type DockerBuildStrategyApplyConfiguration struct { + From *v1.ObjectReference `json:"from,omitempty"` + PullSecret *v1.LocalObjectReference `json:"pullSecret,omitempty"` + NoCache *bool `json:"noCache,omitempty"` + Env []v1.EnvVar `json:"env,omitempty"` + ForcePull *bool `json:"forcePull,omitempty"` + DockerfilePath *string `json:"dockerfilePath,omitempty"` + BuildArgs []v1.EnvVar `json:"buildArgs,omitempty"` + ImageOptimizationPolicy *buildv1.ImageOptimizationPolicy `json:"imageOptimizationPolicy,omitempty"` + Volumes []BuildVolumeApplyConfiguration `json:"volumes,omitempty"` +} + +// DockerBuildStrategyApplyConfiguration constructs an declarative configuration of the DockerBuildStrategy type for use with +// apply. +func DockerBuildStrategy() *DockerBuildStrategyApplyConfiguration { + return &DockerBuildStrategyApplyConfiguration{} +} + +// WithFrom sets the From field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the From field is set to the value of the last call. +func (b *DockerBuildStrategyApplyConfiguration) WithFrom(value v1.ObjectReference) *DockerBuildStrategyApplyConfiguration { + b.From = &value + return b +} + +// WithPullSecret sets the PullSecret field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PullSecret field is set to the value of the last call. +func (b *DockerBuildStrategyApplyConfiguration) WithPullSecret(value v1.LocalObjectReference) *DockerBuildStrategyApplyConfiguration { + b.PullSecret = &value + return b +} + +// WithNoCache sets the NoCache field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NoCache field is set to the value of the last call. +func (b *DockerBuildStrategyApplyConfiguration) WithNoCache(value bool) *DockerBuildStrategyApplyConfiguration { + b.NoCache = &value + return b +} + +// WithEnv adds the given value to the Env field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Env field. +func (b *DockerBuildStrategyApplyConfiguration) WithEnv(values ...v1.EnvVar) *DockerBuildStrategyApplyConfiguration { + for i := range values { + b.Env = append(b.Env, values[i]) + } + return b +} + +// WithForcePull sets the ForcePull field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ForcePull field is set to the value of the last call. +func (b *DockerBuildStrategyApplyConfiguration) WithForcePull(value bool) *DockerBuildStrategyApplyConfiguration { + b.ForcePull = &value + return b +} + +// WithDockerfilePath sets the DockerfilePath field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DockerfilePath field is set to the value of the last call. +func (b *DockerBuildStrategyApplyConfiguration) WithDockerfilePath(value string) *DockerBuildStrategyApplyConfiguration { + b.DockerfilePath = &value + return b +} + +// WithBuildArgs adds the given value to the BuildArgs field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the BuildArgs field. +func (b *DockerBuildStrategyApplyConfiguration) WithBuildArgs(values ...v1.EnvVar) *DockerBuildStrategyApplyConfiguration { + for i := range values { + b.BuildArgs = append(b.BuildArgs, values[i]) + } + return b +} + +// WithImageOptimizationPolicy sets the ImageOptimizationPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ImageOptimizationPolicy field is set to the value of the last call. +func (b *DockerBuildStrategyApplyConfiguration) WithImageOptimizationPolicy(value buildv1.ImageOptimizationPolicy) *DockerBuildStrategyApplyConfiguration { + b.ImageOptimizationPolicy = &value + return b +} + +// WithVolumes adds the given value to the Volumes field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Volumes field. +func (b *DockerBuildStrategyApplyConfiguration) WithVolumes(values ...*BuildVolumeApplyConfiguration) *DockerBuildStrategyApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithVolumes") + } + b.Volumes = append(b.Volumes, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/genericwebhookcause.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/genericwebhookcause.go new file mode 100644 index 000000000..4a6cab48f --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/genericwebhookcause.go @@ -0,0 +1,32 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// GenericWebHookCauseApplyConfiguration represents an declarative configuration of the GenericWebHookCause type for use +// with apply. +type GenericWebHookCauseApplyConfiguration struct { + Revision *SourceRevisionApplyConfiguration `json:"revision,omitempty"` + Secret *string `json:"secret,omitempty"` +} + +// GenericWebHookCauseApplyConfiguration constructs an declarative configuration of the GenericWebHookCause type for use with +// apply. +func GenericWebHookCause() *GenericWebHookCauseApplyConfiguration { + return &GenericWebHookCauseApplyConfiguration{} +} + +// WithRevision sets the Revision field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Revision field is set to the value of the last call. +func (b *GenericWebHookCauseApplyConfiguration) WithRevision(value *SourceRevisionApplyConfiguration) *GenericWebHookCauseApplyConfiguration { + b.Revision = value + return b +} + +// WithSecret sets the Secret field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Secret field is set to the value of the last call. +func (b *GenericWebHookCauseApplyConfiguration) WithSecret(value string) *GenericWebHookCauseApplyConfiguration { + b.Secret = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/gitbuildsource.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/gitbuildsource.go new file mode 100644 index 000000000..735a39771 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/gitbuildsource.go @@ -0,0 +1,57 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// GitBuildSourceApplyConfiguration represents an declarative configuration of the GitBuildSource type for use +// with apply. +type GitBuildSourceApplyConfiguration struct { + URI *string `json:"uri,omitempty"` + Ref *string `json:"ref,omitempty"` + ProxyConfigApplyConfiguration `json:",inline"` +} + +// GitBuildSourceApplyConfiguration constructs an declarative configuration of the GitBuildSource type for use with +// apply. +func GitBuildSource() *GitBuildSourceApplyConfiguration { + return &GitBuildSourceApplyConfiguration{} +} + +// WithURI sets the URI field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the URI field is set to the value of the last call. +func (b *GitBuildSourceApplyConfiguration) WithURI(value string) *GitBuildSourceApplyConfiguration { + b.URI = &value + return b +} + +// WithRef sets the Ref field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Ref field is set to the value of the last call. +func (b *GitBuildSourceApplyConfiguration) WithRef(value string) *GitBuildSourceApplyConfiguration { + b.Ref = &value + return b +} + +// WithHTTPProxy sets the HTTPProxy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the HTTPProxy field is set to the value of the last call. +func (b *GitBuildSourceApplyConfiguration) WithHTTPProxy(value string) *GitBuildSourceApplyConfiguration { + b.HTTPProxy = &value + return b +} + +// WithHTTPSProxy sets the HTTPSProxy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the HTTPSProxy field is set to the value of the last call. +func (b *GitBuildSourceApplyConfiguration) WithHTTPSProxy(value string) *GitBuildSourceApplyConfiguration { + b.HTTPSProxy = &value + return b +} + +// WithNoProxy sets the NoProxy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NoProxy field is set to the value of the last call. +func (b *GitBuildSourceApplyConfiguration) WithNoProxy(value string) *GitBuildSourceApplyConfiguration { + b.NoProxy = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/githubwebhookcause.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/githubwebhookcause.go new file mode 100644 index 000000000..b43034ee8 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/githubwebhookcause.go @@ -0,0 +1,32 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// GitHubWebHookCauseApplyConfiguration represents an declarative configuration of the GitHubWebHookCause type for use +// with apply. +type GitHubWebHookCauseApplyConfiguration struct { + Revision *SourceRevisionApplyConfiguration `json:"revision,omitempty"` + Secret *string `json:"secret,omitempty"` +} + +// GitHubWebHookCauseApplyConfiguration constructs an declarative configuration of the GitHubWebHookCause type for use with +// apply. +func GitHubWebHookCause() *GitHubWebHookCauseApplyConfiguration { + return &GitHubWebHookCauseApplyConfiguration{} +} + +// WithRevision sets the Revision field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Revision field is set to the value of the last call. +func (b *GitHubWebHookCauseApplyConfiguration) WithRevision(value *SourceRevisionApplyConfiguration) *GitHubWebHookCauseApplyConfiguration { + b.Revision = value + return b +} + +// WithSecret sets the Secret field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Secret field is set to the value of the last call. +func (b *GitHubWebHookCauseApplyConfiguration) WithSecret(value string) *GitHubWebHookCauseApplyConfiguration { + b.Secret = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/gitlabwebhookcause.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/gitlabwebhookcause.go new file mode 100644 index 000000000..790378302 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/gitlabwebhookcause.go @@ -0,0 +1,31 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// GitLabWebHookCauseApplyConfiguration represents an declarative configuration of the GitLabWebHookCause type for use +// with apply. +type GitLabWebHookCauseApplyConfiguration struct { + CommonWebHookCauseApplyConfiguration `json:",inline"` +} + +// GitLabWebHookCauseApplyConfiguration constructs an declarative configuration of the GitLabWebHookCause type for use with +// apply. +func GitLabWebHookCause() *GitLabWebHookCauseApplyConfiguration { + return &GitLabWebHookCauseApplyConfiguration{} +} + +// WithRevision sets the Revision field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Revision field is set to the value of the last call. +func (b *GitLabWebHookCauseApplyConfiguration) WithRevision(value *SourceRevisionApplyConfiguration) *GitLabWebHookCauseApplyConfiguration { + b.Revision = value + return b +} + +// WithSecret sets the Secret field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Secret field is set to the value of the last call. +func (b *GitLabWebHookCauseApplyConfiguration) WithSecret(value string) *GitLabWebHookCauseApplyConfiguration { + b.Secret = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/gitsourcerevision.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/gitsourcerevision.go new file mode 100644 index 000000000..acc0c4658 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/gitsourcerevision.go @@ -0,0 +1,50 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// GitSourceRevisionApplyConfiguration represents an declarative configuration of the GitSourceRevision type for use +// with apply. +type GitSourceRevisionApplyConfiguration struct { + Commit *string `json:"commit,omitempty"` + Author *SourceControlUserApplyConfiguration `json:"author,omitempty"` + Committer *SourceControlUserApplyConfiguration `json:"committer,omitempty"` + Message *string `json:"message,omitempty"` +} + +// GitSourceRevisionApplyConfiguration constructs an declarative configuration of the GitSourceRevision type for use with +// apply. +func GitSourceRevision() *GitSourceRevisionApplyConfiguration { + return &GitSourceRevisionApplyConfiguration{} +} + +// WithCommit sets the Commit field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Commit field is set to the value of the last call. +func (b *GitSourceRevisionApplyConfiguration) WithCommit(value string) *GitSourceRevisionApplyConfiguration { + b.Commit = &value + return b +} + +// WithAuthor sets the Author field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Author field is set to the value of the last call. +func (b *GitSourceRevisionApplyConfiguration) WithAuthor(value *SourceControlUserApplyConfiguration) *GitSourceRevisionApplyConfiguration { + b.Author = value + return b +} + +// WithCommitter sets the Committer field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Committer field is set to the value of the last call. +func (b *GitSourceRevisionApplyConfiguration) WithCommitter(value *SourceControlUserApplyConfiguration) *GitSourceRevisionApplyConfiguration { + b.Committer = value + return b +} + +// WithMessage sets the Message field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Message field is set to the value of the last call. +func (b *GitSourceRevisionApplyConfiguration) WithMessage(value string) *GitSourceRevisionApplyConfiguration { + b.Message = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/imagechangecause.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/imagechangecause.go new file mode 100644 index 000000000..e84dd8587 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/imagechangecause.go @@ -0,0 +1,36 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/core/v1" +) + +// ImageChangeCauseApplyConfiguration represents an declarative configuration of the ImageChangeCause type for use +// with apply. +type ImageChangeCauseApplyConfiguration struct { + ImageID *string `json:"imageID,omitempty"` + FromRef *v1.ObjectReference `json:"fromRef,omitempty"` +} + +// ImageChangeCauseApplyConfiguration constructs an declarative configuration of the ImageChangeCause type for use with +// apply. +func ImageChangeCause() *ImageChangeCauseApplyConfiguration { + return &ImageChangeCauseApplyConfiguration{} +} + +// WithImageID sets the ImageID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ImageID field is set to the value of the last call. +func (b *ImageChangeCauseApplyConfiguration) WithImageID(value string) *ImageChangeCauseApplyConfiguration { + b.ImageID = &value + return b +} + +// WithFromRef sets the FromRef field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the FromRef field is set to the value of the last call. +func (b *ImageChangeCauseApplyConfiguration) WithFromRef(value v1.ObjectReference) *ImageChangeCauseApplyConfiguration { + b.FromRef = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/imagechangetrigger.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/imagechangetrigger.go new file mode 100644 index 000000000..d9ce5c44b --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/imagechangetrigger.go @@ -0,0 +1,45 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/core/v1" +) + +// ImageChangeTriggerApplyConfiguration represents an declarative configuration of the ImageChangeTrigger type for use +// with apply. +type ImageChangeTriggerApplyConfiguration struct { + LastTriggeredImageID *string `json:"lastTriggeredImageID,omitempty"` + From *v1.ObjectReference `json:"from,omitempty"` + Paused *bool `json:"paused,omitempty"` +} + +// ImageChangeTriggerApplyConfiguration constructs an declarative configuration of the ImageChangeTrigger type for use with +// apply. +func ImageChangeTrigger() *ImageChangeTriggerApplyConfiguration { + return &ImageChangeTriggerApplyConfiguration{} +} + +// WithLastTriggeredImageID sets the LastTriggeredImageID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LastTriggeredImageID field is set to the value of the last call. +func (b *ImageChangeTriggerApplyConfiguration) WithLastTriggeredImageID(value string) *ImageChangeTriggerApplyConfiguration { + b.LastTriggeredImageID = &value + return b +} + +// WithFrom sets the From field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the From field is set to the value of the last call. +func (b *ImageChangeTriggerApplyConfiguration) WithFrom(value v1.ObjectReference) *ImageChangeTriggerApplyConfiguration { + b.From = &value + return b +} + +// WithPaused sets the Paused field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Paused field is set to the value of the last call. +func (b *ImageChangeTriggerApplyConfiguration) WithPaused(value bool) *ImageChangeTriggerApplyConfiguration { + b.Paused = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/imagechangetriggerstatus.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/imagechangetriggerstatus.go new file mode 100644 index 000000000..a5dc91280 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/imagechangetriggerstatus.go @@ -0,0 +1,45 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// ImageChangeTriggerStatusApplyConfiguration represents an declarative configuration of the ImageChangeTriggerStatus type for use +// with apply. +type ImageChangeTriggerStatusApplyConfiguration struct { + LastTriggeredImageID *string `json:"lastTriggeredImageID,omitempty"` + From *ImageStreamTagReferenceApplyConfiguration `json:"from,omitempty"` + LastTriggerTime *metav1.Time `json:"lastTriggerTime,omitempty"` +} + +// ImageChangeTriggerStatusApplyConfiguration constructs an declarative configuration of the ImageChangeTriggerStatus type for use with +// apply. +func ImageChangeTriggerStatus() *ImageChangeTriggerStatusApplyConfiguration { + return &ImageChangeTriggerStatusApplyConfiguration{} +} + +// WithLastTriggeredImageID sets the LastTriggeredImageID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LastTriggeredImageID field is set to the value of the last call. +func (b *ImageChangeTriggerStatusApplyConfiguration) WithLastTriggeredImageID(value string) *ImageChangeTriggerStatusApplyConfiguration { + b.LastTriggeredImageID = &value + return b +} + +// WithFrom sets the From field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the From field is set to the value of the last call. +func (b *ImageChangeTriggerStatusApplyConfiguration) WithFrom(value *ImageStreamTagReferenceApplyConfiguration) *ImageChangeTriggerStatusApplyConfiguration { + b.From = value + return b +} + +// WithLastTriggerTime sets the LastTriggerTime field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LastTriggerTime field is set to the value of the last call. +func (b *ImageChangeTriggerStatusApplyConfiguration) WithLastTriggerTime(value metav1.Time) *ImageChangeTriggerStatusApplyConfiguration { + b.LastTriggerTime = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/imagelabel.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/imagelabel.go new file mode 100644 index 000000000..1199666c4 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/imagelabel.go @@ -0,0 +1,32 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ImageLabelApplyConfiguration represents an declarative configuration of the ImageLabel type for use +// with apply. +type ImageLabelApplyConfiguration struct { + Name *string `json:"name,omitempty"` + Value *string `json:"value,omitempty"` +} + +// ImageLabelApplyConfiguration constructs an declarative configuration of the ImageLabel type for use with +// apply. +func ImageLabel() *ImageLabelApplyConfiguration { + return &ImageLabelApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ImageLabelApplyConfiguration) WithName(value string) *ImageLabelApplyConfiguration { + b.Name = &value + return b +} + +// WithValue sets the Value field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Value field is set to the value of the last call. +func (b *ImageLabelApplyConfiguration) WithValue(value string) *ImageLabelApplyConfiguration { + b.Value = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/imagesource.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/imagesource.go new file mode 100644 index 000000000..f233d321d --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/imagesource.go @@ -0,0 +1,61 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/core/v1" +) + +// ImageSourceApplyConfiguration represents an declarative configuration of the ImageSource type for use +// with apply. +type ImageSourceApplyConfiguration struct { + From *v1.ObjectReference `json:"from,omitempty"` + As []string `json:"as,omitempty"` + Paths []ImageSourcePathApplyConfiguration `json:"paths,omitempty"` + PullSecret *v1.LocalObjectReference `json:"pullSecret,omitempty"` +} + +// ImageSourceApplyConfiguration constructs an declarative configuration of the ImageSource type for use with +// apply. +func ImageSource() *ImageSourceApplyConfiguration { + return &ImageSourceApplyConfiguration{} +} + +// WithFrom sets the From field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the From field is set to the value of the last call. +func (b *ImageSourceApplyConfiguration) WithFrom(value v1.ObjectReference) *ImageSourceApplyConfiguration { + b.From = &value + return b +} + +// WithAs adds the given value to the As field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the As field. +func (b *ImageSourceApplyConfiguration) WithAs(values ...string) *ImageSourceApplyConfiguration { + for i := range values { + b.As = append(b.As, values[i]) + } + return b +} + +// WithPaths adds the given value to the Paths field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Paths field. +func (b *ImageSourceApplyConfiguration) WithPaths(values ...*ImageSourcePathApplyConfiguration) *ImageSourceApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithPaths") + } + b.Paths = append(b.Paths, *values[i]) + } + return b +} + +// WithPullSecret sets the PullSecret field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PullSecret field is set to the value of the last call. +func (b *ImageSourceApplyConfiguration) WithPullSecret(value v1.LocalObjectReference) *ImageSourceApplyConfiguration { + b.PullSecret = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/imagesourcepath.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/imagesourcepath.go new file mode 100644 index 000000000..a02a09649 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/imagesourcepath.go @@ -0,0 +1,32 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ImageSourcePathApplyConfiguration represents an declarative configuration of the ImageSourcePath type for use +// with apply. +type ImageSourcePathApplyConfiguration struct { + SourcePath *string `json:"sourcePath,omitempty"` + DestinationDir *string `json:"destinationDir,omitempty"` +} + +// ImageSourcePathApplyConfiguration constructs an declarative configuration of the ImageSourcePath type for use with +// apply. +func ImageSourcePath() *ImageSourcePathApplyConfiguration { + return &ImageSourcePathApplyConfiguration{} +} + +// WithSourcePath sets the SourcePath field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SourcePath field is set to the value of the last call. +func (b *ImageSourcePathApplyConfiguration) WithSourcePath(value string) *ImageSourcePathApplyConfiguration { + b.SourcePath = &value + return b +} + +// WithDestinationDir sets the DestinationDir field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DestinationDir field is set to the value of the last call. +func (b *ImageSourcePathApplyConfiguration) WithDestinationDir(value string) *ImageSourcePathApplyConfiguration { + b.DestinationDir = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/imagestreamtagreference.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/imagestreamtagreference.go new file mode 100644 index 000000000..f37673f1b --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/imagestreamtagreference.go @@ -0,0 +1,32 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ImageStreamTagReferenceApplyConfiguration represents an declarative configuration of the ImageStreamTagReference type for use +// with apply. +type ImageStreamTagReferenceApplyConfiguration struct { + Namespace *string `json:"namespace,omitempty"` + Name *string `json:"name,omitempty"` +} + +// ImageStreamTagReferenceApplyConfiguration constructs an declarative configuration of the ImageStreamTagReference type for use with +// apply. +func ImageStreamTagReference() *ImageStreamTagReferenceApplyConfiguration { + return &ImageStreamTagReferenceApplyConfiguration{} +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ImageStreamTagReferenceApplyConfiguration) WithNamespace(value string) *ImageStreamTagReferenceApplyConfiguration { + b.Namespace = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ImageStreamTagReferenceApplyConfiguration) WithName(value string) *ImageStreamTagReferenceApplyConfiguration { + b.Name = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/jenkinspipelinebuildstrategy.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/jenkinspipelinebuildstrategy.go new file mode 100644 index 000000000..29a0b6eeb --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/jenkinspipelinebuildstrategy.go @@ -0,0 +1,47 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/core/v1" +) + +// JenkinsPipelineBuildStrategyApplyConfiguration represents an declarative configuration of the JenkinsPipelineBuildStrategy type for use +// with apply. +type JenkinsPipelineBuildStrategyApplyConfiguration struct { + JenkinsfilePath *string `json:"jenkinsfilePath,omitempty"` + Jenkinsfile *string `json:"jenkinsfile,omitempty"` + Env []v1.EnvVar `json:"env,omitempty"` +} + +// JenkinsPipelineBuildStrategyApplyConfiguration constructs an declarative configuration of the JenkinsPipelineBuildStrategy type for use with +// apply. +func JenkinsPipelineBuildStrategy() *JenkinsPipelineBuildStrategyApplyConfiguration { + return &JenkinsPipelineBuildStrategyApplyConfiguration{} +} + +// WithJenkinsfilePath sets the JenkinsfilePath field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the JenkinsfilePath field is set to the value of the last call. +func (b *JenkinsPipelineBuildStrategyApplyConfiguration) WithJenkinsfilePath(value string) *JenkinsPipelineBuildStrategyApplyConfiguration { + b.JenkinsfilePath = &value + return b +} + +// WithJenkinsfile sets the Jenkinsfile field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Jenkinsfile field is set to the value of the last call. +func (b *JenkinsPipelineBuildStrategyApplyConfiguration) WithJenkinsfile(value string) *JenkinsPipelineBuildStrategyApplyConfiguration { + b.Jenkinsfile = &value + return b +} + +// WithEnv adds the given value to the Env field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Env field. +func (b *JenkinsPipelineBuildStrategyApplyConfiguration) WithEnv(values ...v1.EnvVar) *JenkinsPipelineBuildStrategyApplyConfiguration { + for i := range values { + b.Env = append(b.Env, values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/proxyconfig.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/proxyconfig.go new file mode 100644 index 000000000..4e8f8bdb7 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/proxyconfig.go @@ -0,0 +1,41 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ProxyConfigApplyConfiguration represents an declarative configuration of the ProxyConfig type for use +// with apply. +type ProxyConfigApplyConfiguration struct { + HTTPProxy *string `json:"httpProxy,omitempty"` + HTTPSProxy *string `json:"httpsProxy,omitempty"` + NoProxy *string `json:"noProxy,omitempty"` +} + +// ProxyConfigApplyConfiguration constructs an declarative configuration of the ProxyConfig type for use with +// apply. +func ProxyConfig() *ProxyConfigApplyConfiguration { + return &ProxyConfigApplyConfiguration{} +} + +// WithHTTPProxy sets the HTTPProxy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the HTTPProxy field is set to the value of the last call. +func (b *ProxyConfigApplyConfiguration) WithHTTPProxy(value string) *ProxyConfigApplyConfiguration { + b.HTTPProxy = &value + return b +} + +// WithHTTPSProxy sets the HTTPSProxy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the HTTPSProxy field is set to the value of the last call. +func (b *ProxyConfigApplyConfiguration) WithHTTPSProxy(value string) *ProxyConfigApplyConfiguration { + b.HTTPSProxy = &value + return b +} + +// WithNoProxy sets the NoProxy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NoProxy field is set to the value of the last call. +func (b *ProxyConfigApplyConfiguration) WithNoProxy(value string) *ProxyConfigApplyConfiguration { + b.NoProxy = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/secretbuildsource.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/secretbuildsource.go new file mode 100644 index 000000000..295920ab1 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/secretbuildsource.go @@ -0,0 +1,36 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/core/v1" +) + +// SecretBuildSourceApplyConfiguration represents an declarative configuration of the SecretBuildSource type for use +// with apply. +type SecretBuildSourceApplyConfiguration struct { + Secret *v1.LocalObjectReference `json:"secret,omitempty"` + DestinationDir *string `json:"destinationDir,omitempty"` +} + +// SecretBuildSourceApplyConfiguration constructs an declarative configuration of the SecretBuildSource type for use with +// apply. +func SecretBuildSource() *SecretBuildSourceApplyConfiguration { + return &SecretBuildSourceApplyConfiguration{} +} + +// WithSecret sets the Secret field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Secret field is set to the value of the last call. +func (b *SecretBuildSourceApplyConfiguration) WithSecret(value v1.LocalObjectReference) *SecretBuildSourceApplyConfiguration { + b.Secret = &value + return b +} + +// WithDestinationDir sets the DestinationDir field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DestinationDir field is set to the value of the last call. +func (b *SecretBuildSourceApplyConfiguration) WithDestinationDir(value string) *SecretBuildSourceApplyConfiguration { + b.DestinationDir = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/secretlocalreference.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/secretlocalreference.go new file mode 100644 index 000000000..4526f4454 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/secretlocalreference.go @@ -0,0 +1,23 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// SecretLocalReferenceApplyConfiguration represents an declarative configuration of the SecretLocalReference type for use +// with apply. +type SecretLocalReferenceApplyConfiguration struct { + Name *string `json:"name,omitempty"` +} + +// SecretLocalReferenceApplyConfiguration constructs an declarative configuration of the SecretLocalReference type for use with +// apply. +func SecretLocalReference() *SecretLocalReferenceApplyConfiguration { + return &SecretLocalReferenceApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *SecretLocalReferenceApplyConfiguration) WithName(value string) *SecretLocalReferenceApplyConfiguration { + b.Name = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/secretspec.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/secretspec.go new file mode 100644 index 000000000..2a10460c4 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/secretspec.go @@ -0,0 +1,36 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/core/v1" +) + +// SecretSpecApplyConfiguration represents an declarative configuration of the SecretSpec type for use +// with apply. +type SecretSpecApplyConfiguration struct { + SecretSource *v1.LocalObjectReference `json:"secretSource,omitempty"` + MountPath *string `json:"mountPath,omitempty"` +} + +// SecretSpecApplyConfiguration constructs an declarative configuration of the SecretSpec type for use with +// apply. +func SecretSpec() *SecretSpecApplyConfiguration { + return &SecretSpecApplyConfiguration{} +} + +// WithSecretSource sets the SecretSource field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SecretSource field is set to the value of the last call. +func (b *SecretSpecApplyConfiguration) WithSecretSource(value v1.LocalObjectReference) *SecretSpecApplyConfiguration { + b.SecretSource = &value + return b +} + +// WithMountPath sets the MountPath field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MountPath field is set to the value of the last call. +func (b *SecretSpecApplyConfiguration) WithMountPath(value string) *SecretSpecApplyConfiguration { + b.MountPath = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/sourcebuildstrategy.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/sourcebuildstrategy.go new file mode 100644 index 000000000..ece802184 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/sourcebuildstrategy.go @@ -0,0 +1,88 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/core/v1" +) + +// SourceBuildStrategyApplyConfiguration represents an declarative configuration of the SourceBuildStrategy type for use +// with apply. +type SourceBuildStrategyApplyConfiguration struct { + From *v1.ObjectReference `json:"from,omitempty"` + PullSecret *v1.LocalObjectReference `json:"pullSecret,omitempty"` + Env []v1.EnvVar `json:"env,omitempty"` + Scripts *string `json:"scripts,omitempty"` + Incremental *bool `json:"incremental,omitempty"` + ForcePull *bool `json:"forcePull,omitempty"` + Volumes []BuildVolumeApplyConfiguration `json:"volumes,omitempty"` +} + +// SourceBuildStrategyApplyConfiguration constructs an declarative configuration of the SourceBuildStrategy type for use with +// apply. +func SourceBuildStrategy() *SourceBuildStrategyApplyConfiguration { + return &SourceBuildStrategyApplyConfiguration{} +} + +// WithFrom sets the From field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the From field is set to the value of the last call. +func (b *SourceBuildStrategyApplyConfiguration) WithFrom(value v1.ObjectReference) *SourceBuildStrategyApplyConfiguration { + b.From = &value + return b +} + +// WithPullSecret sets the PullSecret field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PullSecret field is set to the value of the last call. +func (b *SourceBuildStrategyApplyConfiguration) WithPullSecret(value v1.LocalObjectReference) *SourceBuildStrategyApplyConfiguration { + b.PullSecret = &value + return b +} + +// WithEnv adds the given value to the Env field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Env field. +func (b *SourceBuildStrategyApplyConfiguration) WithEnv(values ...v1.EnvVar) *SourceBuildStrategyApplyConfiguration { + for i := range values { + b.Env = append(b.Env, values[i]) + } + return b +} + +// WithScripts sets the Scripts field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Scripts field is set to the value of the last call. +func (b *SourceBuildStrategyApplyConfiguration) WithScripts(value string) *SourceBuildStrategyApplyConfiguration { + b.Scripts = &value + return b +} + +// WithIncremental sets the Incremental field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Incremental field is set to the value of the last call. +func (b *SourceBuildStrategyApplyConfiguration) WithIncremental(value bool) *SourceBuildStrategyApplyConfiguration { + b.Incremental = &value + return b +} + +// WithForcePull sets the ForcePull field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ForcePull field is set to the value of the last call. +func (b *SourceBuildStrategyApplyConfiguration) WithForcePull(value bool) *SourceBuildStrategyApplyConfiguration { + b.ForcePull = &value + return b +} + +// WithVolumes adds the given value to the Volumes field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Volumes field. +func (b *SourceBuildStrategyApplyConfiguration) WithVolumes(values ...*BuildVolumeApplyConfiguration) *SourceBuildStrategyApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithVolumes") + } + b.Volumes = append(b.Volumes, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/sourcecontroluser.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/sourcecontroluser.go new file mode 100644 index 000000000..466531fd7 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/sourcecontroluser.go @@ -0,0 +1,32 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// SourceControlUserApplyConfiguration represents an declarative configuration of the SourceControlUser type for use +// with apply. +type SourceControlUserApplyConfiguration struct { + Name *string `json:"name,omitempty"` + Email *string `json:"email,omitempty"` +} + +// SourceControlUserApplyConfiguration constructs an declarative configuration of the SourceControlUser type for use with +// apply. +func SourceControlUser() *SourceControlUserApplyConfiguration { + return &SourceControlUserApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *SourceControlUserApplyConfiguration) WithName(value string) *SourceControlUserApplyConfiguration { + b.Name = &value + return b +} + +// WithEmail sets the Email field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Email field is set to the value of the last call. +func (b *SourceControlUserApplyConfiguration) WithEmail(value string) *SourceControlUserApplyConfiguration { + b.Email = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/sourcerevision.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/sourcerevision.go new file mode 100644 index 000000000..6433c1b12 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/sourcerevision.go @@ -0,0 +1,36 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/openshift/api/build/v1" +) + +// SourceRevisionApplyConfiguration represents an declarative configuration of the SourceRevision type for use +// with apply. +type SourceRevisionApplyConfiguration struct { + Type *v1.BuildSourceType `json:"type,omitempty"` + Git *GitSourceRevisionApplyConfiguration `json:"git,omitempty"` +} + +// SourceRevisionApplyConfiguration constructs an declarative configuration of the SourceRevision type for use with +// apply. +func SourceRevision() *SourceRevisionApplyConfiguration { + return &SourceRevisionApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *SourceRevisionApplyConfiguration) WithType(value v1.BuildSourceType) *SourceRevisionApplyConfiguration { + b.Type = &value + return b +} + +// WithGit sets the Git field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Git field is set to the value of the last call. +func (b *SourceRevisionApplyConfiguration) WithGit(value *GitSourceRevisionApplyConfiguration) *SourceRevisionApplyConfiguration { + b.Git = value + return b +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/stageinfo.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/stageinfo.go new file mode 100644 index 000000000..4a9656df5 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/stageinfo.go @@ -0,0 +1,60 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/openshift/api/build/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// StageInfoApplyConfiguration represents an declarative configuration of the StageInfo type for use +// with apply. +type StageInfoApplyConfiguration struct { + Name *v1.StageName `json:"name,omitempty"` + StartTime *metav1.Time `json:"startTime,omitempty"` + DurationMilliseconds *int64 `json:"durationMilliseconds,omitempty"` + Steps []StepInfoApplyConfiguration `json:"steps,omitempty"` +} + +// StageInfoApplyConfiguration constructs an declarative configuration of the StageInfo type for use with +// apply. +func StageInfo() *StageInfoApplyConfiguration { + return &StageInfoApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *StageInfoApplyConfiguration) WithName(value v1.StageName) *StageInfoApplyConfiguration { + b.Name = &value + return b +} + +// WithStartTime sets the StartTime field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the StartTime field is set to the value of the last call. +func (b *StageInfoApplyConfiguration) WithStartTime(value metav1.Time) *StageInfoApplyConfiguration { + b.StartTime = &value + return b +} + +// WithDurationMilliseconds sets the DurationMilliseconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DurationMilliseconds field is set to the value of the last call. +func (b *StageInfoApplyConfiguration) WithDurationMilliseconds(value int64) *StageInfoApplyConfiguration { + b.DurationMilliseconds = &value + return b +} + +// WithSteps adds the given value to the Steps field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Steps field. +func (b *StageInfoApplyConfiguration) WithSteps(values ...*StepInfoApplyConfiguration) *StageInfoApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithSteps") + } + b.Steps = append(b.Steps, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/stepinfo.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/stepinfo.go new file mode 100644 index 000000000..df2895697 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/stepinfo.go @@ -0,0 +1,46 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/openshift/api/build/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// StepInfoApplyConfiguration represents an declarative configuration of the StepInfo type for use +// with apply. +type StepInfoApplyConfiguration struct { + Name *v1.StepName `json:"name,omitempty"` + StartTime *metav1.Time `json:"startTime,omitempty"` + DurationMilliseconds *int64 `json:"durationMilliseconds,omitempty"` +} + +// StepInfoApplyConfiguration constructs an declarative configuration of the StepInfo type for use with +// apply. +func StepInfo() *StepInfoApplyConfiguration { + return &StepInfoApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *StepInfoApplyConfiguration) WithName(value v1.StepName) *StepInfoApplyConfiguration { + b.Name = &value + return b +} + +// WithStartTime sets the StartTime field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the StartTime field is set to the value of the last call. +func (b *StepInfoApplyConfiguration) WithStartTime(value metav1.Time) *StepInfoApplyConfiguration { + b.StartTime = &value + return b +} + +// WithDurationMilliseconds sets the DurationMilliseconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DurationMilliseconds field is set to the value of the last call. +func (b *StepInfoApplyConfiguration) WithDurationMilliseconds(value int64) *StepInfoApplyConfiguration { + b.DurationMilliseconds = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/webhooktrigger.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/webhooktrigger.go new file mode 100644 index 000000000..7ca0fd7b9 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/build/v1/webhooktrigger.go @@ -0,0 +1,41 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// WebHookTriggerApplyConfiguration represents an declarative configuration of the WebHookTrigger type for use +// with apply. +type WebHookTriggerApplyConfiguration struct { + Secret *string `json:"secret,omitempty"` + AllowEnv *bool `json:"allowEnv,omitempty"` + SecretReference *SecretLocalReferenceApplyConfiguration `json:"secretReference,omitempty"` +} + +// WebHookTriggerApplyConfiguration constructs an declarative configuration of the WebHookTrigger type for use with +// apply. +func WebHookTrigger() *WebHookTriggerApplyConfiguration { + return &WebHookTriggerApplyConfiguration{} +} + +// WithSecret sets the Secret field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Secret field is set to the value of the last call. +func (b *WebHookTriggerApplyConfiguration) WithSecret(value string) *WebHookTriggerApplyConfiguration { + b.Secret = &value + return b +} + +// WithAllowEnv sets the AllowEnv field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AllowEnv field is set to the value of the last call. +func (b *WebHookTriggerApplyConfiguration) WithAllowEnv(value bool) *WebHookTriggerApplyConfiguration { + b.AllowEnv = &value + return b +} + +// WithSecretReference sets the SecretReference field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SecretReference field is set to the value of the last call. +func (b *WebHookTriggerApplyConfiguration) WithSecretReference(value *SecretLocalReferenceApplyConfiguration) *WebHookTriggerApplyConfiguration { + b.SecretReference = value + return b +} diff --git a/vendor/github.com/openshift/client-go/build/applyconfigurations/internal/internal.go b/vendor/github.com/openshift/client-go/build/applyconfigurations/internal/internal.go new file mode 100644 index 000000000..874b31cad --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/applyconfigurations/internal/internal.go @@ -0,0 +1,1195 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package internal + +import ( + "fmt" + "sync" + + typed "sigs.k8s.io/structured-merge-diff/v4/typed" +) + +func Parser() *typed.Parser { + parserOnce.Do(func() { + var err error + parser, err = typed.NewParser(schemaYAML) + if err != nil { + panic(fmt.Sprintf("Failed to parse schema: %v", err)) + } + }) + return parser +} + +var parserOnce sync.Once +var parser *typed.Parser +var schemaYAML = typed.YAMLObject(`types: +- name: com.github.openshift.api.build.v1.BinaryBuildSource + map: + fields: + - name: asFile + type: + scalar: string +- name: com.github.openshift.api.build.v1.BitbucketWebHookCause + map: + fields: + - name: revision + type: + namedType: com.github.openshift.api.build.v1.SourceRevision + - name: secret + type: + scalar: string +- name: com.github.openshift.api.build.v1.Build + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: com.github.openshift.api.build.v1.BuildSpec + default: {} + - name: status + type: + namedType: com.github.openshift.api.build.v1.BuildStatus + default: {} +- name: com.github.openshift.api.build.v1.BuildCondition + map: + fields: + - name: lastTransitionTime + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + default: {} + - name: lastUpdateTime + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + default: {} + - name: message + type: + scalar: string + - name: reason + type: + scalar: string + - name: status + type: + scalar: string + default: "" + - name: type + type: + scalar: string + default: "" +- name: com.github.openshift.api.build.v1.BuildConfig + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: com.github.openshift.api.build.v1.BuildConfigSpec + default: {} + - name: status + type: + namedType: com.github.openshift.api.build.v1.BuildConfigStatus + default: {} +- name: com.github.openshift.api.build.v1.BuildConfigSpec + map: + fields: + - name: completionDeadlineSeconds + type: + scalar: numeric + - name: failedBuildsHistoryLimit + type: + scalar: numeric + - name: mountTrustedCA + type: + scalar: boolean + - name: nodeSelector + type: + map: + elementType: + scalar: string + - name: output + type: + namedType: com.github.openshift.api.build.v1.BuildOutput + default: {} + - name: postCommit + type: + namedType: com.github.openshift.api.build.v1.BuildPostCommitSpec + default: {} + - name: resources + type: + namedType: io.k8s.api.core.v1.ResourceRequirements + default: {} + - name: revision + type: + namedType: com.github.openshift.api.build.v1.SourceRevision + - name: runPolicy + type: + scalar: string + - name: serviceAccount + type: + scalar: string + - name: source + type: + namedType: com.github.openshift.api.build.v1.BuildSource + default: {} + - name: strategy + type: + namedType: com.github.openshift.api.build.v1.BuildStrategy + default: {} + - name: successfulBuildsHistoryLimit + type: + scalar: numeric + - name: triggers + type: + list: + elementType: + namedType: com.github.openshift.api.build.v1.BuildTriggerPolicy + elementRelationship: atomic +- name: com.github.openshift.api.build.v1.BuildConfigStatus + map: + fields: + - name: imageChangeTriggers + type: + list: + elementType: + namedType: com.github.openshift.api.build.v1.ImageChangeTriggerStatus + elementRelationship: atomic + - name: lastVersion + type: + scalar: numeric + default: 0 +- name: com.github.openshift.api.build.v1.BuildOutput + map: + fields: + - name: imageLabels + type: + list: + elementType: + namedType: com.github.openshift.api.build.v1.ImageLabel + elementRelationship: atomic + - name: pushSecret + type: + namedType: io.k8s.api.core.v1.LocalObjectReference + - name: to + type: + namedType: io.k8s.api.core.v1.ObjectReference +- name: com.github.openshift.api.build.v1.BuildPostCommitSpec + map: + fields: + - name: args + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: command + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: script + type: + scalar: string +- name: com.github.openshift.api.build.v1.BuildSource + map: + fields: + - name: binary + type: + namedType: com.github.openshift.api.build.v1.BinaryBuildSource + - name: configMaps + type: + list: + elementType: + namedType: com.github.openshift.api.build.v1.ConfigMapBuildSource + elementRelationship: atomic + - name: contextDir + type: + scalar: string + - name: dockerfile + type: + scalar: string + - name: git + type: + namedType: com.github.openshift.api.build.v1.GitBuildSource + - name: images + type: + list: + elementType: + namedType: com.github.openshift.api.build.v1.ImageSource + elementRelationship: atomic + - name: secrets + type: + list: + elementType: + namedType: com.github.openshift.api.build.v1.SecretBuildSource + elementRelationship: atomic + - name: sourceSecret + type: + namedType: io.k8s.api.core.v1.LocalObjectReference + - name: type + type: + scalar: string +- name: com.github.openshift.api.build.v1.BuildSpec + map: + fields: + - name: completionDeadlineSeconds + type: + scalar: numeric + - name: mountTrustedCA + type: + scalar: boolean + - name: nodeSelector + type: + map: + elementType: + scalar: string + - name: output + type: + namedType: com.github.openshift.api.build.v1.BuildOutput + default: {} + - name: postCommit + type: + namedType: com.github.openshift.api.build.v1.BuildPostCommitSpec + default: {} + - name: resources + type: + namedType: io.k8s.api.core.v1.ResourceRequirements + default: {} + - name: revision + type: + namedType: com.github.openshift.api.build.v1.SourceRevision + - name: serviceAccount + type: + scalar: string + - name: source + type: + namedType: com.github.openshift.api.build.v1.BuildSource + default: {} + - name: strategy + type: + namedType: com.github.openshift.api.build.v1.BuildStrategy + default: {} + - name: triggeredBy + type: + list: + elementType: + namedType: com.github.openshift.api.build.v1.BuildTriggerCause + elementRelationship: atomic +- name: com.github.openshift.api.build.v1.BuildStatus + map: + fields: + - name: cancelled + type: + scalar: boolean + - name: completionTimestamp + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + - name: conditions + type: + list: + elementType: + namedType: com.github.openshift.api.build.v1.BuildCondition + elementRelationship: associative + keys: + - type + - name: config + type: + namedType: io.k8s.api.core.v1.ObjectReference + - name: duration + type: + scalar: numeric + - name: logSnippet + type: + scalar: string + - name: message + type: + scalar: string + - name: output + type: + namedType: com.github.openshift.api.build.v1.BuildStatusOutput + default: {} + - name: outputDockerImageReference + type: + scalar: string + - name: phase + type: + scalar: string + default: "" + - name: reason + type: + scalar: string + - name: stages + type: + list: + elementType: + namedType: com.github.openshift.api.build.v1.StageInfo + elementRelationship: atomic + - name: startTimestamp + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time +- name: com.github.openshift.api.build.v1.BuildStatusOutput + map: + fields: + - name: to + type: + namedType: com.github.openshift.api.build.v1.BuildStatusOutputTo +- name: com.github.openshift.api.build.v1.BuildStatusOutputTo + map: + fields: + - name: imageDigest + type: + scalar: string +- name: com.github.openshift.api.build.v1.BuildStrategy + map: + fields: + - name: customStrategy + type: + namedType: com.github.openshift.api.build.v1.CustomBuildStrategy + - name: dockerStrategy + type: + namedType: com.github.openshift.api.build.v1.DockerBuildStrategy + - name: jenkinsPipelineStrategy + type: + namedType: com.github.openshift.api.build.v1.JenkinsPipelineBuildStrategy + - name: sourceStrategy + type: + namedType: com.github.openshift.api.build.v1.SourceBuildStrategy + - name: type + type: + scalar: string +- name: com.github.openshift.api.build.v1.BuildTriggerCause + map: + fields: + - name: bitbucketWebHook + type: + namedType: com.github.openshift.api.build.v1.BitbucketWebHookCause + - name: genericWebHook + type: + namedType: com.github.openshift.api.build.v1.GenericWebHookCause + - name: githubWebHook + type: + namedType: com.github.openshift.api.build.v1.GitHubWebHookCause + - name: gitlabWebHook + type: + namedType: com.github.openshift.api.build.v1.GitLabWebHookCause + - name: imageChangeBuild + type: + namedType: com.github.openshift.api.build.v1.ImageChangeCause + - name: message + type: + scalar: string +- name: com.github.openshift.api.build.v1.BuildTriggerPolicy + map: + fields: + - name: bitbucket + type: + namedType: com.github.openshift.api.build.v1.WebHookTrigger + - name: generic + type: + namedType: com.github.openshift.api.build.v1.WebHookTrigger + - name: github + type: + namedType: com.github.openshift.api.build.v1.WebHookTrigger + - name: gitlab + type: + namedType: com.github.openshift.api.build.v1.WebHookTrigger + - name: imageChange + type: + namedType: com.github.openshift.api.build.v1.ImageChangeTrigger + - name: type + type: + scalar: string + default: "" +- name: com.github.openshift.api.build.v1.BuildVolume + map: + fields: + - name: mounts + type: + list: + elementType: + namedType: com.github.openshift.api.build.v1.BuildVolumeMount + elementRelationship: associative + keys: + - destinationPath + - name: name + type: + scalar: string + default: "" + - name: source + type: + namedType: com.github.openshift.api.build.v1.BuildVolumeSource + default: {} +- name: com.github.openshift.api.build.v1.BuildVolumeMount + map: + fields: + - name: destinationPath + type: + scalar: string + default: "" +- name: com.github.openshift.api.build.v1.BuildVolumeSource + map: + fields: + - name: configMap + type: + namedType: io.k8s.api.core.v1.ConfigMapVolumeSource + - name: csi + type: + namedType: io.k8s.api.core.v1.CSIVolumeSource + - name: secret + type: + namedType: io.k8s.api.core.v1.SecretVolumeSource + - name: type + type: + scalar: string + default: "" +- name: com.github.openshift.api.build.v1.ConfigMapBuildSource + map: + fields: + - name: configMap + type: + namedType: io.k8s.api.core.v1.LocalObjectReference + default: {} + - name: destinationDir + type: + scalar: string +- name: com.github.openshift.api.build.v1.CustomBuildStrategy + map: + fields: + - name: buildAPIVersion + type: + scalar: string + - name: env + type: + list: + elementType: + namedType: io.k8s.api.core.v1.EnvVar + elementRelationship: atomic + - name: exposeDockerSocket + type: + scalar: boolean + - name: forcePull + type: + scalar: boolean + - name: from + type: + namedType: io.k8s.api.core.v1.ObjectReference + default: {} + - name: pullSecret + type: + namedType: io.k8s.api.core.v1.LocalObjectReference + - name: secrets + type: + list: + elementType: + namedType: com.github.openshift.api.build.v1.SecretSpec + elementRelationship: atomic +- name: com.github.openshift.api.build.v1.DockerBuildStrategy + map: + fields: + - name: buildArgs + type: + list: + elementType: + namedType: io.k8s.api.core.v1.EnvVar + elementRelationship: atomic + - name: dockerfilePath + type: + scalar: string + - name: env + type: + list: + elementType: + namedType: io.k8s.api.core.v1.EnvVar + elementRelationship: atomic + - name: forcePull + type: + scalar: boolean + - name: from + type: + namedType: io.k8s.api.core.v1.ObjectReference + - name: imageOptimizationPolicy + type: + scalar: string + - name: noCache + type: + scalar: boolean + - name: pullSecret + type: + namedType: io.k8s.api.core.v1.LocalObjectReference + - name: volumes + type: + list: + elementType: + namedType: com.github.openshift.api.build.v1.BuildVolume + elementRelationship: associative + keys: + - name +- name: com.github.openshift.api.build.v1.GenericWebHookCause + map: + fields: + - name: revision + type: + namedType: com.github.openshift.api.build.v1.SourceRevision + - name: secret + type: + scalar: string +- name: com.github.openshift.api.build.v1.GitBuildSource + map: + fields: + - name: httpProxy + type: + scalar: string + - name: httpsProxy + type: + scalar: string + - name: noProxy + type: + scalar: string + - name: ref + type: + scalar: string + - name: uri + type: + scalar: string + default: "" +- name: com.github.openshift.api.build.v1.GitHubWebHookCause + map: + fields: + - name: revision + type: + namedType: com.github.openshift.api.build.v1.SourceRevision + - name: secret + type: + scalar: string +- name: com.github.openshift.api.build.v1.GitLabWebHookCause + map: + fields: + - name: revision + type: + namedType: com.github.openshift.api.build.v1.SourceRevision + - name: secret + type: + scalar: string +- name: com.github.openshift.api.build.v1.GitSourceRevision + map: + fields: + - name: author + type: + namedType: com.github.openshift.api.build.v1.SourceControlUser + default: {} + - name: commit + type: + scalar: string + - name: committer + type: + namedType: com.github.openshift.api.build.v1.SourceControlUser + default: {} + - name: message + type: + scalar: string +- name: com.github.openshift.api.build.v1.ImageChangeCause + map: + fields: + - name: fromRef + type: + namedType: io.k8s.api.core.v1.ObjectReference + - name: imageID + type: + scalar: string +- name: com.github.openshift.api.build.v1.ImageChangeTrigger + map: + fields: + - name: from + type: + namedType: io.k8s.api.core.v1.ObjectReference + - name: lastTriggeredImageID + type: + scalar: string + - name: paused + type: + scalar: boolean +- name: com.github.openshift.api.build.v1.ImageChangeTriggerStatus + map: + fields: + - name: from + type: + namedType: com.github.openshift.api.build.v1.ImageStreamTagReference + default: {} + - name: lastTriggerTime + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + default: {} + - name: lastTriggeredImageID + type: + scalar: string +- name: com.github.openshift.api.build.v1.ImageLabel + map: + fields: + - name: name + type: + scalar: string + default: "" + - name: value + type: + scalar: string +- name: com.github.openshift.api.build.v1.ImageSource + map: + fields: + - name: as + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: from + type: + namedType: io.k8s.api.core.v1.ObjectReference + default: {} + - name: paths + type: + list: + elementType: + namedType: com.github.openshift.api.build.v1.ImageSourcePath + elementRelationship: atomic + - name: pullSecret + type: + namedType: io.k8s.api.core.v1.LocalObjectReference +- name: com.github.openshift.api.build.v1.ImageSourcePath + map: + fields: + - name: destinationDir + type: + scalar: string + default: "" + - name: sourcePath + type: + scalar: string + default: "" +- name: com.github.openshift.api.build.v1.ImageStreamTagReference + map: + fields: + - name: name + type: + scalar: string + - name: namespace + type: + scalar: string +- name: com.github.openshift.api.build.v1.JenkinsPipelineBuildStrategy + map: + fields: + - name: env + type: + list: + elementType: + namedType: io.k8s.api.core.v1.EnvVar + elementRelationship: atomic + - name: jenkinsfile + type: + scalar: string + - name: jenkinsfilePath + type: + scalar: string +- name: com.github.openshift.api.build.v1.SecretBuildSource + map: + fields: + - name: destinationDir + type: + scalar: string + - name: secret + type: + namedType: io.k8s.api.core.v1.LocalObjectReference + default: {} +- name: com.github.openshift.api.build.v1.SecretLocalReference + map: + fields: + - name: name + type: + scalar: string + default: "" +- name: com.github.openshift.api.build.v1.SecretSpec + map: + fields: + - name: mountPath + type: + scalar: string + default: "" + - name: secretSource + type: + namedType: io.k8s.api.core.v1.LocalObjectReference + default: {} +- name: com.github.openshift.api.build.v1.SourceBuildStrategy + map: + fields: + - name: env + type: + list: + elementType: + namedType: io.k8s.api.core.v1.EnvVar + elementRelationship: atomic + - name: forcePull + type: + scalar: boolean + - name: from + type: + namedType: io.k8s.api.core.v1.ObjectReference + default: {} + - name: incremental + type: + scalar: boolean + - name: pullSecret + type: + namedType: io.k8s.api.core.v1.LocalObjectReference + - name: scripts + type: + scalar: string + - name: volumes + type: + list: + elementType: + namedType: com.github.openshift.api.build.v1.BuildVolume + elementRelationship: associative + keys: + - name +- name: com.github.openshift.api.build.v1.SourceControlUser + map: + fields: + - name: email + type: + scalar: string + - name: name + type: + scalar: string +- name: com.github.openshift.api.build.v1.SourceRevision + map: + fields: + - name: git + type: + namedType: com.github.openshift.api.build.v1.GitSourceRevision + - name: type + type: + scalar: string + default: "" +- name: com.github.openshift.api.build.v1.StageInfo + map: + fields: + - name: durationMilliseconds + type: + scalar: numeric + - name: name + type: + scalar: string + - name: startTime + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + default: {} + - name: steps + type: + list: + elementType: + namedType: com.github.openshift.api.build.v1.StepInfo + elementRelationship: atomic +- name: com.github.openshift.api.build.v1.StepInfo + map: + fields: + - name: durationMilliseconds + type: + scalar: numeric + - name: name + type: + scalar: string + - name: startTime + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + default: {} +- name: com.github.openshift.api.build.v1.WebHookTrigger + map: + fields: + - name: allowEnv + type: + scalar: boolean + - name: secret + type: + scalar: string + - name: secretReference + type: + namedType: com.github.openshift.api.build.v1.SecretLocalReference +- name: io.k8s.api.core.v1.CSIVolumeSource + map: + fields: + - name: driver + type: + scalar: string + default: "" + - name: fsType + type: + scalar: string + - name: nodePublishSecretRef + type: + namedType: io.k8s.api.core.v1.LocalObjectReference + - name: readOnly + type: + scalar: boolean + - name: volumeAttributes + type: + map: + elementType: + scalar: string +- name: io.k8s.api.core.v1.ConfigMapKeySelector + map: + fields: + - name: key + type: + scalar: string + default: "" + - name: name + type: + scalar: string + - name: optional + type: + scalar: boolean + elementRelationship: atomic +- name: io.k8s.api.core.v1.ConfigMapVolumeSource + map: + fields: + - name: defaultMode + type: + scalar: numeric + - name: items + type: + list: + elementType: + namedType: io.k8s.api.core.v1.KeyToPath + elementRelationship: atomic + - name: name + type: + scalar: string + - name: optional + type: + scalar: boolean +- name: io.k8s.api.core.v1.EnvVar + map: + fields: + - name: name + type: + scalar: string + default: "" + - name: value + type: + scalar: string + - name: valueFrom + type: + namedType: io.k8s.api.core.v1.EnvVarSource +- name: io.k8s.api.core.v1.EnvVarSource + map: + fields: + - name: configMapKeyRef + type: + namedType: io.k8s.api.core.v1.ConfigMapKeySelector + - name: fieldRef + type: + namedType: io.k8s.api.core.v1.ObjectFieldSelector + - name: resourceFieldRef + type: + namedType: io.k8s.api.core.v1.ResourceFieldSelector + - name: secretKeyRef + type: + namedType: io.k8s.api.core.v1.SecretKeySelector +- name: io.k8s.api.core.v1.KeyToPath + map: + fields: + - name: key + type: + scalar: string + default: "" + - name: mode + type: + scalar: numeric + - name: path + type: + scalar: string + default: "" +- name: io.k8s.api.core.v1.LocalObjectReference + map: + fields: + - name: name + type: + scalar: string + elementRelationship: atomic +- name: io.k8s.api.core.v1.ObjectFieldSelector + map: + fields: + - name: apiVersion + type: + scalar: string + - name: fieldPath + type: + scalar: string + default: "" + elementRelationship: atomic +- name: io.k8s.api.core.v1.ObjectReference + map: + fields: + - name: apiVersion + type: + scalar: string + - name: fieldPath + type: + scalar: string + - name: kind + type: + scalar: string + - name: name + type: + scalar: string + - name: namespace + type: + scalar: string + - name: resourceVersion + type: + scalar: string + - name: uid + type: + scalar: string + elementRelationship: atomic +- name: io.k8s.api.core.v1.ResourceClaim + map: + fields: + - name: name + type: + scalar: string + default: "" +- name: io.k8s.api.core.v1.ResourceFieldSelector + map: + fields: + - name: containerName + type: + scalar: string + - name: divisor + type: + namedType: io.k8s.apimachinery.pkg.api.resource.Quantity + default: {} + - name: resource + type: + scalar: string + default: "" + elementRelationship: atomic +- name: io.k8s.api.core.v1.ResourceRequirements + map: + fields: + - name: claims + type: + list: + elementType: + namedType: io.k8s.api.core.v1.ResourceClaim + elementRelationship: associative + keys: + - name + - name: limits + type: + map: + elementType: + namedType: io.k8s.apimachinery.pkg.api.resource.Quantity + - name: requests + type: + map: + elementType: + namedType: io.k8s.apimachinery.pkg.api.resource.Quantity +- name: io.k8s.api.core.v1.SecretKeySelector + map: + fields: + - name: key + type: + scalar: string + default: "" + - name: name + type: + scalar: string + - name: optional + type: + scalar: boolean + elementRelationship: atomic +- name: io.k8s.api.core.v1.SecretVolumeSource + map: + fields: + - name: defaultMode + type: + scalar: numeric + - name: items + type: + list: + elementType: + namedType: io.k8s.api.core.v1.KeyToPath + elementRelationship: atomic + - name: optional + type: + scalar: boolean + - name: secretName + type: + scalar: string +- name: io.k8s.apimachinery.pkg.api.resource.Quantity + scalar: untyped +- name: io.k8s.apimachinery.pkg.apis.meta.v1.FieldsV1 + map: + elementType: + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_deduced_ + elementRelationship: separable +- name: io.k8s.apimachinery.pkg.apis.meta.v1.ManagedFieldsEntry + map: + fields: + - name: apiVersion + type: + scalar: string + - name: fieldsType + type: + scalar: string + - name: fieldsV1 + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.FieldsV1 + - name: manager + type: + scalar: string + - name: operation + type: + scalar: string + - name: subresource + type: + scalar: string + - name: time + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time +- name: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + map: + fields: + - name: annotations + type: + map: + elementType: + scalar: string + - name: creationTimestamp + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + default: {} + - name: deletionGracePeriodSeconds + type: + scalar: numeric + - name: deletionTimestamp + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + - name: finalizers + type: + list: + elementType: + scalar: string + elementRelationship: associative + - name: generateName + type: + scalar: string + - name: generation + type: + scalar: numeric + - name: labels + type: + map: + elementType: + scalar: string + - name: managedFields + type: + list: + elementType: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ManagedFieldsEntry + elementRelationship: atomic + - name: name + type: + scalar: string + - name: namespace + type: + scalar: string + - name: ownerReferences + type: + list: + elementType: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.OwnerReference + elementRelationship: associative + keys: + - uid + - name: resourceVersion + type: + scalar: string + - name: selfLink + type: + scalar: string + - name: uid + type: + scalar: string +- name: io.k8s.apimachinery.pkg.apis.meta.v1.OwnerReference + map: + fields: + - name: apiVersion + type: + scalar: string + default: "" + - name: blockOwnerDeletion + type: + scalar: boolean + - name: controller + type: + scalar: boolean + - name: kind + type: + scalar: string + default: "" + - name: name + type: + scalar: string + default: "" + - name: uid + type: + scalar: string + default: "" + elementRelationship: atomic +- name: io.k8s.apimachinery.pkg.apis.meta.v1.Time + scalar: untyped +- name: __untyped_atomic_ + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic +- name: __untyped_deduced_ + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_deduced_ + elementRelationship: separable +`) diff --git a/vendor/github.com/openshift/client-go/build/clientset/versioned/clientset.go b/vendor/github.com/openshift/client-go/build/clientset/versioned/clientset.go new file mode 100644 index 000000000..d7c9c6980 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/clientset/versioned/clientset.go @@ -0,0 +1,105 @@ +// Code generated by client-gen. DO NOT EDIT. + +package versioned + +import ( + "fmt" + "net/http" + + buildv1 "github.com/openshift/client-go/build/clientset/versioned/typed/build/v1" + discovery "k8s.io/client-go/discovery" + rest "k8s.io/client-go/rest" + flowcontrol "k8s.io/client-go/util/flowcontrol" +) + +type Interface interface { + Discovery() discovery.DiscoveryInterface + BuildV1() buildv1.BuildV1Interface +} + +// Clientset contains the clients for groups. Each group has exactly one +// version included in a Clientset. +type Clientset struct { + *discovery.DiscoveryClient + buildV1 *buildv1.BuildV1Client +} + +// BuildV1 retrieves the BuildV1Client +func (c *Clientset) BuildV1() buildv1.BuildV1Interface { + return c.buildV1 +} + +// Discovery retrieves the DiscoveryClient +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + if c == nil { + return nil + } + return c.DiscoveryClient +} + +// NewForConfig creates a new Clientset for the given config. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfig will generate a rate-limiter in configShallowCopy. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*Clientset, error) { + configShallowCopy := *c + + if configShallowCopy.UserAgent == "" { + configShallowCopy.UserAgent = rest.DefaultKubernetesUserAgent() + } + + // share the transport between all clients + httpClient, err := rest.HTTPClientFor(&configShallowCopy) + if err != nil { + return nil, err + } + + return NewForConfigAndClient(&configShallowCopy, httpClient) +} + +// NewForConfigAndClient creates a new Clientset for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfigAndClient will generate a rate-limiter in configShallowCopy. +func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, error) { + configShallowCopy := *c + if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { + if configShallowCopy.Burst <= 0 { + return nil, fmt.Errorf("burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0") + } + configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) + } + + var cs Clientset + var err error + cs.buildV1, err = buildv1.NewForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } + + cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } + return &cs, nil +} + +// NewForConfigOrDie creates a new Clientset for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *Clientset { + cs, err := NewForConfig(c) + if err != nil { + panic(err) + } + return cs +} + +// New creates a new Clientset for the given RESTClient. +func New(c rest.Interface) *Clientset { + var cs Clientset + cs.buildV1 = buildv1.New(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClient(c) + return &cs +} diff --git a/vendor/github.com/openshift/client-go/build/clientset/versioned/doc.go b/vendor/github.com/openshift/client-go/build/clientset/versioned/doc.go new file mode 100644 index 000000000..0e0c2a890 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/clientset/versioned/doc.go @@ -0,0 +1,4 @@ +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated clientset. +package versioned diff --git a/vendor/github.com/openshift/client-go/build/clientset/versioned/scheme/doc.go b/vendor/github.com/openshift/client-go/build/clientset/versioned/scheme/doc.go new file mode 100644 index 000000000..14db57a58 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/clientset/versioned/scheme/doc.go @@ -0,0 +1,4 @@ +// Code generated by client-gen. DO NOT EDIT. + +// This package contains the scheme of the automatically generated clientset. +package scheme diff --git a/vendor/github.com/openshift/client-go/build/clientset/versioned/scheme/register.go b/vendor/github.com/openshift/client-go/build/clientset/versioned/scheme/register.go new file mode 100644 index 000000000..faa53af8f --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/clientset/versioned/scheme/register.go @@ -0,0 +1,40 @@ +// Code generated by client-gen. DO NOT EDIT. + +package scheme + +import ( + buildv1 "github.com/openshift/api/build/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var Scheme = runtime.NewScheme() +var Codecs = serializer.NewCodecFactory(Scheme) +var ParameterCodec = runtime.NewParameterCodec(Scheme) +var localSchemeBuilder = runtime.SchemeBuilder{ + buildv1.AddToScheme, +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(Scheme)) +} diff --git a/vendor/github.com/openshift/client-go/build/clientset/versioned/typed/build/v1/build.go b/vendor/github.com/openshift/client-go/build/clientset/versioned/typed/build/v1/build.go new file mode 100644 index 000000000..22cc0e7da --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/clientset/versioned/typed/build/v1/build.go @@ -0,0 +1,273 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + json "encoding/json" + "fmt" + "time" + + v1 "github.com/openshift/api/build/v1" + buildv1 "github.com/openshift/client-go/build/applyconfigurations/build/v1" + scheme "github.com/openshift/client-go/build/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// BuildsGetter has a method to return a BuildInterface. +// A group's client should implement this interface. +type BuildsGetter interface { + Builds(namespace string) BuildInterface +} + +// BuildInterface has methods to work with Build resources. +type BuildInterface interface { + Create(ctx context.Context, build *v1.Build, opts metav1.CreateOptions) (*v1.Build, error) + Update(ctx context.Context, build *v1.Build, opts metav1.UpdateOptions) (*v1.Build, error) + UpdateStatus(ctx context.Context, build *v1.Build, opts metav1.UpdateOptions) (*v1.Build, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Build, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.BuildList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Build, err error) + Apply(ctx context.Context, build *buildv1.BuildApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Build, err error) + ApplyStatus(ctx context.Context, build *buildv1.BuildApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Build, err error) + UpdateDetails(ctx context.Context, buildName string, build *v1.Build, opts metav1.UpdateOptions) (*v1.Build, error) + Clone(ctx context.Context, buildName string, buildRequest *v1.BuildRequest, opts metav1.CreateOptions) (*v1.Build, error) + + BuildExpansion +} + +// builds implements BuildInterface +type builds struct { + client rest.Interface + ns string +} + +// newBuilds returns a Builds +func newBuilds(c *BuildV1Client, namespace string) *builds { + return &builds{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the build, and returns the corresponding build object, and an error if there is any. +func (c *builds) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Build, err error) { + result = &v1.Build{} + err = c.client.Get(). + Namespace(c.ns). + Resource("builds"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Builds that match those selectors. +func (c *builds) List(ctx context.Context, opts metav1.ListOptions) (result *v1.BuildList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.BuildList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("builds"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested builds. +func (c *builds) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("builds"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a build and creates it. Returns the server's representation of the build, and an error, if there is any. +func (c *builds) Create(ctx context.Context, build *v1.Build, opts metav1.CreateOptions) (result *v1.Build, err error) { + result = &v1.Build{} + err = c.client.Post(). + Namespace(c.ns). + Resource("builds"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(build). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a build and updates it. Returns the server's representation of the build, and an error, if there is any. +func (c *builds) Update(ctx context.Context, build *v1.Build, opts metav1.UpdateOptions) (result *v1.Build, err error) { + result = &v1.Build{} + err = c.client.Put(). + Namespace(c.ns). + Resource("builds"). + Name(build.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(build). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *builds) UpdateStatus(ctx context.Context, build *v1.Build, opts metav1.UpdateOptions) (result *v1.Build, err error) { + result = &v1.Build{} + err = c.client.Put(). + Namespace(c.ns). + Resource("builds"). + Name(build.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(build). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the build and deletes it. Returns an error if one occurs. +func (c *builds) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("builds"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *builds) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("builds"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched build. +func (c *builds) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Build, err error) { + result = &v1.Build{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("builds"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied build. +func (c *builds) Apply(ctx context.Context, build *buildv1.BuildApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Build, err error) { + if build == nil { + return nil, fmt.Errorf("build provided to Apply must not be nil") + } + patchOpts := opts.ToPatchOptions() + data, err := json.Marshal(build) + if err != nil { + return nil, err + } + name := build.Name + if name == nil { + return nil, fmt.Errorf("build.Name must be provided to Apply") + } + result = &v1.Build{} + err = c.client.Patch(types.ApplyPatchType). + Namespace(c.ns). + Resource("builds"). + Name(*name). + VersionedParams(&patchOpts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} + +// ApplyStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). +func (c *builds) ApplyStatus(ctx context.Context, build *buildv1.BuildApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Build, err error) { + if build == nil { + return nil, fmt.Errorf("build provided to Apply must not be nil") + } + patchOpts := opts.ToPatchOptions() + data, err := json.Marshal(build) + if err != nil { + return nil, err + } + + name := build.Name + if name == nil { + return nil, fmt.Errorf("build.Name must be provided to Apply") + } + + result = &v1.Build{} + err = c.client.Patch(types.ApplyPatchType). + Namespace(c.ns). + Resource("builds"). + Name(*name). + SubResource("status"). + VersionedParams(&patchOpts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} + +// UpdateDetails takes the top resource name and the representation of a build and updates it. Returns the server's representation of the build, and an error, if there is any. +func (c *builds) UpdateDetails(ctx context.Context, buildName string, build *v1.Build, opts metav1.UpdateOptions) (result *v1.Build, err error) { + result = &v1.Build{} + err = c.client.Put(). + Namespace(c.ns). + Resource("builds"). + Name(buildName). + SubResource("details"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(build). + Do(ctx). + Into(result) + return +} + +// Clone takes the representation of a buildRequest and creates it. Returns the server's representation of the build, and an error, if there is any. +func (c *builds) Clone(ctx context.Context, buildName string, buildRequest *v1.BuildRequest, opts metav1.CreateOptions) (result *v1.Build, err error) { + result = &v1.Build{} + err = c.client.Post(). + Namespace(c.ns). + Resource("builds"). + Name(buildName). + SubResource("clone"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(buildRequest). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/openshift/client-go/build/clientset/versioned/typed/build/v1/build_client.go b/vendor/github.com/openshift/client-go/build/clientset/versioned/typed/build/v1/build_client.go new file mode 100644 index 000000000..dcd0a5549 --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/clientset/versioned/typed/build/v1/build_client.go @@ -0,0 +1,96 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "net/http" + + v1 "github.com/openshift/api/build/v1" + "github.com/openshift/client-go/build/clientset/versioned/scheme" + rest "k8s.io/client-go/rest" +) + +type BuildV1Interface interface { + RESTClient() rest.Interface + BuildsGetter + BuildConfigsGetter +} + +// BuildV1Client is used to interact with features provided by the build.openshift.io group. +type BuildV1Client struct { + restClient rest.Interface +} + +func (c *BuildV1Client) Builds(namespace string) BuildInterface { + return newBuilds(c, namespace) +} + +func (c *BuildV1Client) BuildConfigs(namespace string) BuildConfigInterface { + return newBuildConfigs(c, namespace) +} + +// NewForConfig creates a new BuildV1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*BuildV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new BuildV1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*BuildV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) + if err != nil { + return nil, err + } + return &BuildV1Client{client}, nil +} + +// NewForConfigOrDie creates a new BuildV1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *BuildV1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new BuildV1Client for the given RESTClient. +func New(c rest.Interface) *BuildV1Client { + return &BuildV1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *BuildV1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/github.com/openshift/client-go/build/clientset/versioned/typed/build/v1/buildconfig.go b/vendor/github.com/openshift/client-go/build/clientset/versioned/typed/build/v1/buildconfig.go new file mode 100644 index 000000000..488d5a10e --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/clientset/versioned/typed/build/v1/buildconfig.go @@ -0,0 +1,257 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + json "encoding/json" + "fmt" + "time" + + v1 "github.com/openshift/api/build/v1" + buildv1 "github.com/openshift/client-go/build/applyconfigurations/build/v1" + scheme "github.com/openshift/client-go/build/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// BuildConfigsGetter has a method to return a BuildConfigInterface. +// A group's client should implement this interface. +type BuildConfigsGetter interface { + BuildConfigs(namespace string) BuildConfigInterface +} + +// BuildConfigInterface has methods to work with BuildConfig resources. +type BuildConfigInterface interface { + Create(ctx context.Context, buildConfig *v1.BuildConfig, opts metav1.CreateOptions) (*v1.BuildConfig, error) + Update(ctx context.Context, buildConfig *v1.BuildConfig, opts metav1.UpdateOptions) (*v1.BuildConfig, error) + UpdateStatus(ctx context.Context, buildConfig *v1.BuildConfig, opts metav1.UpdateOptions) (*v1.BuildConfig, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.BuildConfig, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.BuildConfigList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.BuildConfig, err error) + Apply(ctx context.Context, buildConfig *buildv1.BuildConfigApplyConfiguration, opts metav1.ApplyOptions) (result *v1.BuildConfig, err error) + ApplyStatus(ctx context.Context, buildConfig *buildv1.BuildConfigApplyConfiguration, opts metav1.ApplyOptions) (result *v1.BuildConfig, err error) + Instantiate(ctx context.Context, buildConfigName string, buildRequest *v1.BuildRequest, opts metav1.CreateOptions) (*v1.Build, error) + + BuildConfigExpansion +} + +// buildConfigs implements BuildConfigInterface +type buildConfigs struct { + client rest.Interface + ns string +} + +// newBuildConfigs returns a BuildConfigs +func newBuildConfigs(c *BuildV1Client, namespace string) *buildConfigs { + return &buildConfigs{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the buildConfig, and returns the corresponding buildConfig object, and an error if there is any. +func (c *buildConfigs) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.BuildConfig, err error) { + result = &v1.BuildConfig{} + err = c.client.Get(). + Namespace(c.ns). + Resource("buildconfigs"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of BuildConfigs that match those selectors. +func (c *buildConfigs) List(ctx context.Context, opts metav1.ListOptions) (result *v1.BuildConfigList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.BuildConfigList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("buildconfigs"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested buildConfigs. +func (c *buildConfigs) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("buildconfigs"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a buildConfig and creates it. Returns the server's representation of the buildConfig, and an error, if there is any. +func (c *buildConfigs) Create(ctx context.Context, buildConfig *v1.BuildConfig, opts metav1.CreateOptions) (result *v1.BuildConfig, err error) { + result = &v1.BuildConfig{} + err = c.client.Post(). + Namespace(c.ns). + Resource("buildconfigs"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(buildConfig). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a buildConfig and updates it. Returns the server's representation of the buildConfig, and an error, if there is any. +func (c *buildConfigs) Update(ctx context.Context, buildConfig *v1.BuildConfig, opts metav1.UpdateOptions) (result *v1.BuildConfig, err error) { + result = &v1.BuildConfig{} + err = c.client.Put(). + Namespace(c.ns). + Resource("buildconfigs"). + Name(buildConfig.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(buildConfig). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *buildConfigs) UpdateStatus(ctx context.Context, buildConfig *v1.BuildConfig, opts metav1.UpdateOptions) (result *v1.BuildConfig, err error) { + result = &v1.BuildConfig{} + err = c.client.Put(). + Namespace(c.ns). + Resource("buildconfigs"). + Name(buildConfig.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(buildConfig). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the buildConfig and deletes it. Returns an error if one occurs. +func (c *buildConfigs) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("buildconfigs"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *buildConfigs) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("buildconfigs"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched buildConfig. +func (c *buildConfigs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.BuildConfig, err error) { + result = &v1.BuildConfig{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("buildconfigs"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied buildConfig. +func (c *buildConfigs) Apply(ctx context.Context, buildConfig *buildv1.BuildConfigApplyConfiguration, opts metav1.ApplyOptions) (result *v1.BuildConfig, err error) { + if buildConfig == nil { + return nil, fmt.Errorf("buildConfig provided to Apply must not be nil") + } + patchOpts := opts.ToPatchOptions() + data, err := json.Marshal(buildConfig) + if err != nil { + return nil, err + } + name := buildConfig.Name + if name == nil { + return nil, fmt.Errorf("buildConfig.Name must be provided to Apply") + } + result = &v1.BuildConfig{} + err = c.client.Patch(types.ApplyPatchType). + Namespace(c.ns). + Resource("buildconfigs"). + Name(*name). + VersionedParams(&patchOpts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} + +// ApplyStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). +func (c *buildConfigs) ApplyStatus(ctx context.Context, buildConfig *buildv1.BuildConfigApplyConfiguration, opts metav1.ApplyOptions) (result *v1.BuildConfig, err error) { + if buildConfig == nil { + return nil, fmt.Errorf("buildConfig provided to Apply must not be nil") + } + patchOpts := opts.ToPatchOptions() + data, err := json.Marshal(buildConfig) + if err != nil { + return nil, err + } + + name := buildConfig.Name + if name == nil { + return nil, fmt.Errorf("buildConfig.Name must be provided to Apply") + } + + result = &v1.BuildConfig{} + err = c.client.Patch(types.ApplyPatchType). + Namespace(c.ns). + Resource("buildconfigs"). + Name(*name). + SubResource("status"). + VersionedParams(&patchOpts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} + +// Instantiate takes the representation of a buildRequest and creates it. Returns the server's representation of the build, and an error, if there is any. +func (c *buildConfigs) Instantiate(ctx context.Context, buildConfigName string, buildRequest *v1.BuildRequest, opts metav1.CreateOptions) (result *v1.Build, err error) { + result = &v1.Build{} + err = c.client.Post(). + Namespace(c.ns). + Resource("buildconfigs"). + Name(buildConfigName). + SubResource("instantiate"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(buildRequest). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/openshift/client-go/build/clientset/versioned/typed/build/v1/doc.go b/vendor/github.com/openshift/client-go/build/clientset/versioned/typed/build/v1/doc.go new file mode 100644 index 000000000..225e6b2be --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/clientset/versioned/typed/build/v1/doc.go @@ -0,0 +1,4 @@ +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1 diff --git a/vendor/github.com/openshift/client-go/build/clientset/versioned/typed/build/v1/generated_expansion.go b/vendor/github.com/openshift/client-go/build/clientset/versioned/typed/build/v1/generated_expansion.go new file mode 100644 index 000000000..d2a1f885c --- /dev/null +++ b/vendor/github.com/openshift/client-go/build/clientset/versioned/typed/build/v1/generated_expansion.go @@ -0,0 +1,7 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +type BuildExpansion interface{} + +type BuildConfigExpansion interface{} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awsdnsspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awsdnsspec.go new file mode 100644 index 000000000..4f7ce43d1 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/awsdnsspec.go @@ -0,0 +1,23 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// AWSDNSSpecApplyConfiguration represents an declarative configuration of the AWSDNSSpec type for use +// with apply. +type AWSDNSSpecApplyConfiguration struct { + PrivateZoneIAMRole *string `json:"privateZoneIAMRole,omitempty"` +} + +// AWSDNSSpecApplyConfiguration constructs an declarative configuration of the AWSDNSSpec type for use with +// apply. +func AWSDNSSpec() *AWSDNSSpecApplyConfiguration { + return &AWSDNSSpecApplyConfiguration{} +} + +// WithPrivateZoneIAMRole sets the PrivateZoneIAMRole field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PrivateZoneIAMRole field is set to the value of the last call. +func (b *AWSDNSSpecApplyConfiguration) WithPrivateZoneIAMRole(value string) *AWSDNSSpecApplyConfiguration { + b.PrivateZoneIAMRole = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/azureplatformstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/azureplatformstatus.go index 93b780688..52b291553 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/azureplatformstatus.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/azureplatformstatus.go @@ -9,10 +9,11 @@ import ( // AzurePlatformStatusApplyConfiguration represents an declarative configuration of the AzurePlatformStatus type for use // with apply. type AzurePlatformStatusApplyConfiguration struct { - ResourceGroupName *string `json:"resourceGroupName,omitempty"` - NetworkResourceGroupName *string `json:"networkResourceGroupName,omitempty"` - CloudName *v1.AzureCloudEnvironment `json:"cloudName,omitempty"` - ARMEndpoint *string `json:"armEndpoint,omitempty"` + ResourceGroupName *string `json:"resourceGroupName,omitempty"` + NetworkResourceGroupName *string `json:"networkResourceGroupName,omitempty"` + CloudName *v1.AzureCloudEnvironment `json:"cloudName,omitempty"` + ARMEndpoint *string `json:"armEndpoint,omitempty"` + ResourceTags []AzureResourceTagApplyConfiguration `json:"resourceTags,omitempty"` } // AzurePlatformStatusApplyConfiguration constructs an declarative configuration of the AzurePlatformStatus type for use with @@ -52,3 +53,16 @@ func (b *AzurePlatformStatusApplyConfiguration) WithARMEndpoint(value string) *A b.ARMEndpoint = &value return b } + +// WithResourceTags adds the given value to the ResourceTags field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ResourceTags field. +func (b *AzurePlatformStatusApplyConfiguration) WithResourceTags(values ...*AzureResourceTagApplyConfiguration) *AzurePlatformStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithResourceTags") + } + b.ResourceTags = append(b.ResourceTags, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/azureresourcetag.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/azureresourcetag.go new file mode 100644 index 000000000..f258f0987 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/azureresourcetag.go @@ -0,0 +1,32 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// AzureResourceTagApplyConfiguration represents an declarative configuration of the AzureResourceTag type for use +// with apply. +type AzureResourceTagApplyConfiguration struct { + Key *string `json:"key,omitempty"` + Value *string `json:"value,omitempty"` +} + +// AzureResourceTagApplyConfiguration constructs an declarative configuration of the AzureResourceTag type for use with +// apply. +func AzureResourceTag() *AzureResourceTagApplyConfiguration { + return &AzureResourceTagApplyConfiguration{} +} + +// WithKey sets the Key field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Key field is set to the value of the last call. +func (b *AzureResourceTagApplyConfiguration) WithKey(value string) *AzureResourceTagApplyConfiguration { + b.Key = &value + return b +} + +// WithValue sets the Value field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Value field is set to the value of the last call. +func (b *AzureResourceTagApplyConfiguration) WithValue(value string) *AzureResourceTagApplyConfiguration { + b.Value = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/baremetalplatformloadbalancer.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/baremetalplatformloadbalancer.go new file mode 100644 index 000000000..7ff5dd99e --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/baremetalplatformloadbalancer.go @@ -0,0 +1,27 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/openshift/api/config/v1" +) + +// BareMetalPlatformLoadBalancerApplyConfiguration represents an declarative configuration of the BareMetalPlatformLoadBalancer type for use +// with apply. +type BareMetalPlatformLoadBalancerApplyConfiguration struct { + Type *v1.PlatformLoadBalancerType `json:"type,omitempty"` +} + +// BareMetalPlatformLoadBalancerApplyConfiguration constructs an declarative configuration of the BareMetalPlatformLoadBalancer type for use with +// apply. +func BareMetalPlatformLoadBalancer() *BareMetalPlatformLoadBalancerApplyConfiguration { + return &BareMetalPlatformLoadBalancerApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *BareMetalPlatformLoadBalancerApplyConfiguration) WithType(value v1.PlatformLoadBalancerType) *BareMetalPlatformLoadBalancerApplyConfiguration { + b.Type = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/baremetalplatformstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/baremetalplatformstatus.go index 5e456d7aa..0c4ba2427 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/baremetalplatformstatus.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/baremetalplatformstatus.go @@ -5,11 +5,12 @@ package v1 // BareMetalPlatformStatusApplyConfiguration represents an declarative configuration of the BareMetalPlatformStatus type for use // with apply. type BareMetalPlatformStatusApplyConfiguration struct { - APIServerInternalIP *string `json:"apiServerInternalIP,omitempty"` - APIServerInternalIPs []string `json:"apiServerInternalIPs,omitempty"` - IngressIP *string `json:"ingressIP,omitempty"` - IngressIPs []string `json:"ingressIPs,omitempty"` - NodeDNSIP *string `json:"nodeDNSIP,omitempty"` + APIServerInternalIP *string `json:"apiServerInternalIP,omitempty"` + APIServerInternalIPs []string `json:"apiServerInternalIPs,omitempty"` + IngressIP *string `json:"ingressIP,omitempty"` + IngressIPs []string `json:"ingressIPs,omitempty"` + NodeDNSIP *string `json:"nodeDNSIP,omitempty"` + LoadBalancer *BareMetalPlatformLoadBalancerApplyConfiguration `json:"loadBalancer,omitempty"` } // BareMetalPlatformStatusApplyConfiguration constructs an declarative configuration of the BareMetalPlatformStatus type for use with @@ -61,3 +62,11 @@ func (b *BareMetalPlatformStatusApplyConfiguration) WithNodeDNSIP(value string) b.NodeDNSIP = &value return b } + +// WithLoadBalancer sets the LoadBalancer field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LoadBalancer field is set to the value of the last call. +func (b *BareMetalPlatformStatusApplyConfiguration) WithLoadBalancer(value *BareMetalPlatformLoadBalancerApplyConfiguration) *BareMetalPlatformStatusApplyConfiguration { + b.LoadBalancer = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/customfeaturegates.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/customfeaturegates.go index d9fe99895..0ce419b28 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/customfeaturegates.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/customfeaturegates.go @@ -2,11 +2,15 @@ package v1 +import ( + v1 "github.com/openshift/api/config/v1" +) + // CustomFeatureGatesApplyConfiguration represents an declarative configuration of the CustomFeatureGates type for use // with apply. type CustomFeatureGatesApplyConfiguration struct { - Enabled []string `json:"enabled,omitempty"` - Disabled []string `json:"disabled,omitempty"` + Enabled []v1.FeatureGateName `json:"enabled,omitempty"` + Disabled []v1.FeatureGateName `json:"disabled,omitempty"` } // CustomFeatureGatesApplyConfiguration constructs an declarative configuration of the CustomFeatureGates type for use with @@ -18,7 +22,7 @@ func CustomFeatureGates() *CustomFeatureGatesApplyConfiguration { // WithEnabled adds the given value to the Enabled field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the Enabled field. -func (b *CustomFeatureGatesApplyConfiguration) WithEnabled(values ...string) *CustomFeatureGatesApplyConfiguration { +func (b *CustomFeatureGatesApplyConfiguration) WithEnabled(values ...v1.FeatureGateName) *CustomFeatureGatesApplyConfiguration { for i := range values { b.Enabled = append(b.Enabled, values[i]) } @@ -28,7 +32,7 @@ func (b *CustomFeatureGatesApplyConfiguration) WithEnabled(values ...string) *Cu // WithDisabled adds the given value to the Disabled field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the Disabled field. -func (b *CustomFeatureGatesApplyConfiguration) WithDisabled(values ...string) *CustomFeatureGatesApplyConfiguration { +func (b *CustomFeatureGatesApplyConfiguration) WithDisabled(values ...v1.FeatureGateName) *CustomFeatureGatesApplyConfiguration { for i := range values { b.Disabled = append(b.Disabled, values[i]) } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/dnsplatformspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/dnsplatformspec.go new file mode 100644 index 000000000..8f43c8c5f --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/dnsplatformspec.go @@ -0,0 +1,36 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/openshift/api/config/v1" +) + +// DNSPlatformSpecApplyConfiguration represents an declarative configuration of the DNSPlatformSpec type for use +// with apply. +type DNSPlatformSpecApplyConfiguration struct { + Type *v1.PlatformType `json:"type,omitempty"` + AWS *AWSDNSSpecApplyConfiguration `json:"aws,omitempty"` +} + +// DNSPlatformSpecApplyConfiguration constructs an declarative configuration of the DNSPlatformSpec type for use with +// apply. +func DNSPlatformSpec() *DNSPlatformSpecApplyConfiguration { + return &DNSPlatformSpecApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *DNSPlatformSpecApplyConfiguration) WithType(value v1.PlatformType) *DNSPlatformSpecApplyConfiguration { + b.Type = &value + return b +} + +// WithAWS sets the AWS field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AWS field is set to the value of the last call. +func (b *DNSPlatformSpecApplyConfiguration) WithAWS(value *AWSDNSSpecApplyConfiguration) *DNSPlatformSpecApplyConfiguration { + b.AWS = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/dnsspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/dnsspec.go index cfa268744..b534ef943 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/dnsspec.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/dnsspec.go @@ -5,9 +5,10 @@ package v1 // DNSSpecApplyConfiguration represents an declarative configuration of the DNSSpec type for use // with apply. type DNSSpecApplyConfiguration struct { - BaseDomain *string `json:"baseDomain,omitempty"` - PublicZone *DNSZoneApplyConfiguration `json:"publicZone,omitempty"` - PrivateZone *DNSZoneApplyConfiguration `json:"privateZone,omitempty"` + BaseDomain *string `json:"baseDomain,omitempty"` + PublicZone *DNSZoneApplyConfiguration `json:"publicZone,omitempty"` + PrivateZone *DNSZoneApplyConfiguration `json:"privateZone,omitempty"` + Platform *DNSPlatformSpecApplyConfiguration `json:"platform,omitempty"` } // DNSSpecApplyConfiguration constructs an declarative configuration of the DNSSpec type for use with @@ -39,3 +40,11 @@ func (b *DNSSpecApplyConfiguration) WithPrivateZone(value *DNSZoneApplyConfigura b.PrivateZone = value return b } + +// WithPlatform sets the Platform field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Platform field is set to the value of the last call. +func (b *DNSSpecApplyConfiguration) WithPlatform(value *DNSPlatformSpecApplyConfiguration) *DNSSpecApplyConfiguration { + b.Platform = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/externalplatformspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/externalplatformspec.go new file mode 100644 index 000000000..e9d5ccae5 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/externalplatformspec.go @@ -0,0 +1,23 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ExternalPlatformSpecApplyConfiguration represents an declarative configuration of the ExternalPlatformSpec type for use +// with apply. +type ExternalPlatformSpecApplyConfiguration struct { + PlatformName *string `json:"platformName,omitempty"` +} + +// ExternalPlatformSpecApplyConfiguration constructs an declarative configuration of the ExternalPlatformSpec type for use with +// apply. +func ExternalPlatformSpec() *ExternalPlatformSpecApplyConfiguration { + return &ExternalPlatformSpecApplyConfiguration{} +} + +// WithPlatformName sets the PlatformName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PlatformName field is set to the value of the last call. +func (b *ExternalPlatformSpecApplyConfiguration) WithPlatformName(value string) *ExternalPlatformSpecApplyConfiguration { + b.PlatformName = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregate.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregate.go index 99534a96a..4ba3ab9c5 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregate.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregate.go @@ -16,8 +16,8 @@ import ( type FeatureGateApplyConfiguration struct { v1.TypeMetaApplyConfiguration `json:",inline"` *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *FeatureGateSpecApplyConfiguration `json:"spec,omitempty"` - Status *apiconfigv1.FeatureGateStatus `json:"status,omitempty"` + Spec *FeatureGateSpecApplyConfiguration `json:"spec,omitempty"` + Status *FeatureGateStatusApplyConfiguration `json:"status,omitempty"` } // FeatureGate constructs an declarative configuration of the FeatureGate type for use with @@ -234,7 +234,7 @@ func (b *FeatureGateApplyConfiguration) WithSpec(value *FeatureGateSpecApplyConf // WithStatus sets the Status field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Status field is set to the value of the last call. -func (b *FeatureGateApplyConfiguration) WithStatus(value apiconfigv1.FeatureGateStatus) *FeatureGateApplyConfiguration { - b.Status = &value +func (b *FeatureGateApplyConfiguration) WithStatus(value *FeatureGateStatusApplyConfiguration) *FeatureGateApplyConfiguration { + b.Status = value return b } diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregateattributes.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregateattributes.go new file mode 100644 index 000000000..817cf44f6 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregateattributes.go @@ -0,0 +1,27 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/openshift/api/config/v1" +) + +// FeatureGateAttributesApplyConfiguration represents an declarative configuration of the FeatureGateAttributes type for use +// with apply. +type FeatureGateAttributesApplyConfiguration struct { + Name *v1.FeatureGateName `json:"name,omitempty"` +} + +// FeatureGateAttributesApplyConfiguration constructs an declarative configuration of the FeatureGateAttributes type for use with +// apply. +func FeatureGateAttributes() *FeatureGateAttributesApplyConfiguration { + return &FeatureGateAttributesApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *FeatureGateAttributesApplyConfiguration) WithName(value v1.FeatureGateName) *FeatureGateAttributesApplyConfiguration { + b.Name = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregatedetails.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregatedetails.go new file mode 100644 index 000000000..61bd51ca2 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregatedetails.go @@ -0,0 +1,51 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// FeatureGateDetailsApplyConfiguration represents an declarative configuration of the FeatureGateDetails type for use +// with apply. +type FeatureGateDetailsApplyConfiguration struct { + Version *string `json:"version,omitempty"` + Enabled []FeatureGateAttributesApplyConfiguration `json:"enabled,omitempty"` + Disabled []FeatureGateAttributesApplyConfiguration `json:"disabled,omitempty"` +} + +// FeatureGateDetailsApplyConfiguration constructs an declarative configuration of the FeatureGateDetails type for use with +// apply. +func FeatureGateDetails() *FeatureGateDetailsApplyConfiguration { + return &FeatureGateDetailsApplyConfiguration{} +} + +// WithVersion sets the Version field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Version field is set to the value of the last call. +func (b *FeatureGateDetailsApplyConfiguration) WithVersion(value string) *FeatureGateDetailsApplyConfiguration { + b.Version = &value + return b +} + +// WithEnabled adds the given value to the Enabled field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Enabled field. +func (b *FeatureGateDetailsApplyConfiguration) WithEnabled(values ...*FeatureGateAttributesApplyConfiguration) *FeatureGateDetailsApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithEnabled") + } + b.Enabled = append(b.Enabled, *values[i]) + } + return b +} + +// WithDisabled adds the given value to the Disabled field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Disabled field. +func (b *FeatureGateDetailsApplyConfiguration) WithDisabled(values ...*FeatureGateAttributesApplyConfiguration) *FeatureGateDetailsApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithDisabled") + } + b.Disabled = append(b.Disabled, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregatestatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregatestatus.go new file mode 100644 index 000000000..9ffe735b7 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/featuregatestatus.go @@ -0,0 +1,43 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// FeatureGateStatusApplyConfiguration represents an declarative configuration of the FeatureGateStatus type for use +// with apply. +type FeatureGateStatusApplyConfiguration struct { + Conditions []v1.Condition `json:"conditions,omitempty"` + FeatureGates []FeatureGateDetailsApplyConfiguration `json:"featureGates,omitempty"` +} + +// FeatureGateStatusApplyConfiguration constructs an declarative configuration of the FeatureGateStatus type for use with +// apply. +func FeatureGateStatus() *FeatureGateStatusApplyConfiguration { + return &FeatureGateStatusApplyConfiguration{} +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *FeatureGateStatusApplyConfiguration) WithConditions(values ...v1.Condition) *FeatureGateStatusApplyConfiguration { + for i := range values { + b.Conditions = append(b.Conditions, values[i]) + } + return b +} + +// WithFeatureGates adds the given value to the FeatureGates field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the FeatureGates field. +func (b *FeatureGateStatusApplyConfiguration) WithFeatureGates(values ...*FeatureGateDetailsApplyConfiguration) *FeatureGateStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithFeatureGates") + } + b.FeatureGates = append(b.FeatureGates, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/infrastructurestatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/infrastructurestatus.go index 6c4130d8c..0f45b5562 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/infrastructurestatus.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/infrastructurestatus.go @@ -17,6 +17,7 @@ type InfrastructureStatusApplyConfiguration struct { APIServerInternalURL *string `json:"apiServerInternalURI,omitempty"` ControlPlaneTopology *v1.TopologyMode `json:"controlPlaneTopology,omitempty"` InfrastructureTopology *v1.TopologyMode `json:"infrastructureTopology,omitempty"` + CPUPartitioning *v1.CPUPartitioningMode `json:"cpuPartitioning,omitempty"` } // InfrastructureStatusApplyConfiguration constructs an declarative configuration of the InfrastructureStatus type for use with @@ -88,3 +89,11 @@ func (b *InfrastructureStatusApplyConfiguration) WithInfrastructureTopology(valu b.InfrastructureTopology = &value return b } + +// WithCPUPartitioning sets the CPUPartitioning field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CPUPartitioning field is set to the value of the last call. +func (b *InfrastructureStatusApplyConfiguration) WithCPUPartitioning(value v1.CPUPartitioningMode) *InfrastructureStatusApplyConfiguration { + b.CPUPartitioning = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ingressspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ingressspec.go index a5963751a..d934e664b 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ingressspec.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ingressspec.go @@ -9,7 +9,7 @@ type IngressSpecApplyConfiguration struct { AppsDomain *string `json:"appsDomain,omitempty"` ComponentRoutes []ComponentRouteSpecApplyConfiguration `json:"componentRoutes,omitempty"` RequiredHSTSPolicies []RequiredHSTSPolicyApplyConfiguration `json:"requiredHSTSPolicies,omitempty"` - LoadBalancer *LoadBalancerApplyConfiguration `json:"loadbalancer,omitempty"` + LoadBalancer *LoadBalancerApplyConfiguration `json:"loadBalancer,omitempty"` } // IngressSpecApplyConfiguration constructs an declarative configuration of the IngressSpec type for use with diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nutanixplatformloadbalancer.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nutanixplatformloadbalancer.go new file mode 100644 index 000000000..5ab68bb77 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nutanixplatformloadbalancer.go @@ -0,0 +1,27 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/openshift/api/config/v1" +) + +// NutanixPlatformLoadBalancerApplyConfiguration represents an declarative configuration of the NutanixPlatformLoadBalancer type for use +// with apply. +type NutanixPlatformLoadBalancerApplyConfiguration struct { + Type *v1.PlatformLoadBalancerType `json:"type,omitempty"` +} + +// NutanixPlatformLoadBalancerApplyConfiguration constructs an declarative configuration of the NutanixPlatformLoadBalancer type for use with +// apply. +func NutanixPlatformLoadBalancer() *NutanixPlatformLoadBalancerApplyConfiguration { + return &NutanixPlatformLoadBalancerApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *NutanixPlatformLoadBalancerApplyConfiguration) WithType(value v1.PlatformLoadBalancerType) *NutanixPlatformLoadBalancerApplyConfiguration { + b.Type = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nutanixplatformstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nutanixplatformstatus.go index b3c20ca5c..8dd8a6895 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nutanixplatformstatus.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/nutanixplatformstatus.go @@ -5,10 +5,11 @@ package v1 // NutanixPlatformStatusApplyConfiguration represents an declarative configuration of the NutanixPlatformStatus type for use // with apply. type NutanixPlatformStatusApplyConfiguration struct { - APIServerInternalIP *string `json:"apiServerInternalIP,omitempty"` - APIServerInternalIPs []string `json:"apiServerInternalIPs,omitempty"` - IngressIP *string `json:"ingressIP,omitempty"` - IngressIPs []string `json:"ingressIPs,omitempty"` + APIServerInternalIP *string `json:"apiServerInternalIP,omitempty"` + APIServerInternalIPs []string `json:"apiServerInternalIPs,omitempty"` + IngressIP *string `json:"ingressIP,omitempty"` + IngressIPs []string `json:"ingressIPs,omitempty"` + LoadBalancer *NutanixPlatformLoadBalancerApplyConfiguration `json:"loadBalancer,omitempty"` } // NutanixPlatformStatusApplyConfiguration constructs an declarative configuration of the NutanixPlatformStatus type for use with @@ -52,3 +53,11 @@ func (b *NutanixPlatformStatusApplyConfiguration) WithIngressIPs(values ...strin } return b } + +// WithLoadBalancer sets the LoadBalancer field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LoadBalancer field is set to the value of the last call. +func (b *NutanixPlatformStatusApplyConfiguration) WithLoadBalancer(value *NutanixPlatformLoadBalancerApplyConfiguration) *NutanixPlatformStatusApplyConfiguration { + b.LoadBalancer = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/openstackplatformloadbalancer.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/openstackplatformloadbalancer.go new file mode 100644 index 000000000..2eed83e1c --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/openstackplatformloadbalancer.go @@ -0,0 +1,27 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/openshift/api/config/v1" +) + +// OpenStackPlatformLoadBalancerApplyConfiguration represents an declarative configuration of the OpenStackPlatformLoadBalancer type for use +// with apply. +type OpenStackPlatformLoadBalancerApplyConfiguration struct { + Type *v1.PlatformLoadBalancerType `json:"type,omitempty"` +} + +// OpenStackPlatformLoadBalancerApplyConfiguration constructs an declarative configuration of the OpenStackPlatformLoadBalancer type for use with +// apply. +func OpenStackPlatformLoadBalancer() *OpenStackPlatformLoadBalancerApplyConfiguration { + return &OpenStackPlatformLoadBalancerApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *OpenStackPlatformLoadBalancerApplyConfiguration) WithType(value v1.PlatformLoadBalancerType) *OpenStackPlatformLoadBalancerApplyConfiguration { + b.Type = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/openstackplatformstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/openstackplatformstatus.go index 9f1cfc590..56e3f30a4 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/openstackplatformstatus.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/openstackplatformstatus.go @@ -5,12 +5,13 @@ package v1 // OpenStackPlatformStatusApplyConfiguration represents an declarative configuration of the OpenStackPlatformStatus type for use // with apply. type OpenStackPlatformStatusApplyConfiguration struct { - APIServerInternalIP *string `json:"apiServerInternalIP,omitempty"` - APIServerInternalIPs []string `json:"apiServerInternalIPs,omitempty"` - CloudName *string `json:"cloudName,omitempty"` - IngressIP *string `json:"ingressIP,omitempty"` - IngressIPs []string `json:"ingressIPs,omitempty"` - NodeDNSIP *string `json:"nodeDNSIP,omitempty"` + APIServerInternalIP *string `json:"apiServerInternalIP,omitempty"` + APIServerInternalIPs []string `json:"apiServerInternalIPs,omitempty"` + CloudName *string `json:"cloudName,omitempty"` + IngressIP *string `json:"ingressIP,omitempty"` + IngressIPs []string `json:"ingressIPs,omitempty"` + NodeDNSIP *string `json:"nodeDNSIP,omitempty"` + LoadBalancer *OpenStackPlatformLoadBalancerApplyConfiguration `json:"loadBalancer,omitempty"` } // OpenStackPlatformStatusApplyConfiguration constructs an declarative configuration of the OpenStackPlatformStatus type for use with @@ -70,3 +71,11 @@ func (b *OpenStackPlatformStatusApplyConfiguration) WithNodeDNSIP(value string) b.NodeDNSIP = &value return b } + +// WithLoadBalancer sets the LoadBalancer field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LoadBalancer field is set to the value of the last call. +func (b *OpenStackPlatformStatusApplyConfiguration) WithLoadBalancer(value *OpenStackPlatformLoadBalancerApplyConfiguration) *OpenStackPlatformStatusApplyConfiguration { + b.LoadBalancer = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ovirtplatformloadbalancer.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ovirtplatformloadbalancer.go new file mode 100644 index 000000000..73c2a03a5 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ovirtplatformloadbalancer.go @@ -0,0 +1,27 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/openshift/api/config/v1" +) + +// OvirtPlatformLoadBalancerApplyConfiguration represents an declarative configuration of the OvirtPlatformLoadBalancer type for use +// with apply. +type OvirtPlatformLoadBalancerApplyConfiguration struct { + Type *v1.PlatformLoadBalancerType `json:"type,omitempty"` +} + +// OvirtPlatformLoadBalancerApplyConfiguration constructs an declarative configuration of the OvirtPlatformLoadBalancer type for use with +// apply. +func OvirtPlatformLoadBalancer() *OvirtPlatformLoadBalancerApplyConfiguration { + return &OvirtPlatformLoadBalancerApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *OvirtPlatformLoadBalancerApplyConfiguration) WithType(value v1.PlatformLoadBalancerType) *OvirtPlatformLoadBalancerApplyConfiguration { + b.Type = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ovirtplatformstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ovirtplatformstatus.go index 4aa175dff..21bb6c842 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ovirtplatformstatus.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/ovirtplatformstatus.go @@ -5,11 +5,12 @@ package v1 // OvirtPlatformStatusApplyConfiguration represents an declarative configuration of the OvirtPlatformStatus type for use // with apply. type OvirtPlatformStatusApplyConfiguration struct { - APIServerInternalIP *string `json:"apiServerInternalIP,omitempty"` - APIServerInternalIPs []string `json:"apiServerInternalIPs,omitempty"` - IngressIP *string `json:"ingressIP,omitempty"` - IngressIPs []string `json:"ingressIPs,omitempty"` - NodeDNSIP *string `json:"nodeDNSIP,omitempty"` + APIServerInternalIP *string `json:"apiServerInternalIP,omitempty"` + APIServerInternalIPs []string `json:"apiServerInternalIPs,omitempty"` + IngressIP *string `json:"ingressIP,omitempty"` + IngressIPs []string `json:"ingressIPs,omitempty"` + NodeDNSIP *string `json:"nodeDNSIP,omitempty"` + LoadBalancer *OvirtPlatformLoadBalancerApplyConfiguration `json:"loadBalancer,omitempty"` } // OvirtPlatformStatusApplyConfiguration constructs an declarative configuration of the OvirtPlatformStatus type for use with @@ -61,3 +62,11 @@ func (b *OvirtPlatformStatusApplyConfiguration) WithNodeDNSIP(value string) *Ovi b.NodeDNSIP = &value return b } + +// WithLoadBalancer sets the LoadBalancer field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LoadBalancer field is set to the value of the last call. +func (b *OvirtPlatformStatusApplyConfiguration) WithLoadBalancer(value *OvirtPlatformLoadBalancerApplyConfiguration) *OvirtPlatformStatusApplyConfiguration { + b.LoadBalancer = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/platformspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/platformspec.go index f582fa338..080b2d4f2 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/platformspec.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/platformspec.go @@ -9,20 +9,21 @@ import ( // PlatformSpecApplyConfiguration represents an declarative configuration of the PlatformSpec type for use // with apply. type PlatformSpecApplyConfiguration struct { - Type *v1.PlatformType `json:"type,omitempty"` - AWS *AWSPlatformSpecApplyConfiguration `json:"aws,omitempty"` - Azure *v1.AzurePlatformSpec `json:"azure,omitempty"` - GCP *v1.GCPPlatformSpec `json:"gcp,omitempty"` - BareMetal *v1.BareMetalPlatformSpec `json:"baremetal,omitempty"` - OpenStack *v1.OpenStackPlatformSpec `json:"openstack,omitempty"` - Ovirt *v1.OvirtPlatformSpec `json:"ovirt,omitempty"` - VSphere *v1.VSpherePlatformSpec `json:"vsphere,omitempty"` - IBMCloud *v1.IBMCloudPlatformSpec `json:"ibmcloud,omitempty"` - Kubevirt *v1.KubevirtPlatformSpec `json:"kubevirt,omitempty"` - EquinixMetal *v1.EquinixMetalPlatformSpec `json:"equinixMetal,omitempty"` - PowerVS *PowerVSPlatformSpecApplyConfiguration `json:"powervs,omitempty"` - AlibabaCloud *v1.AlibabaCloudPlatformSpec `json:"alibabaCloud,omitempty"` - Nutanix *NutanixPlatformSpecApplyConfiguration `json:"nutanix,omitempty"` + Type *v1.PlatformType `json:"type,omitempty"` + AWS *AWSPlatformSpecApplyConfiguration `json:"aws,omitempty"` + Azure *v1.AzurePlatformSpec `json:"azure,omitempty"` + GCP *v1.GCPPlatformSpec `json:"gcp,omitempty"` + BareMetal *v1.BareMetalPlatformSpec `json:"baremetal,omitempty"` + OpenStack *v1.OpenStackPlatformSpec `json:"openstack,omitempty"` + Ovirt *v1.OvirtPlatformSpec `json:"ovirt,omitempty"` + VSphere *VSpherePlatformSpecApplyConfiguration `json:"vsphere,omitempty"` + IBMCloud *v1.IBMCloudPlatformSpec `json:"ibmcloud,omitempty"` + Kubevirt *v1.KubevirtPlatformSpec `json:"kubevirt,omitempty"` + EquinixMetal *v1.EquinixMetalPlatformSpec `json:"equinixMetal,omitempty"` + PowerVS *PowerVSPlatformSpecApplyConfiguration `json:"powervs,omitempty"` + AlibabaCloud *v1.AlibabaCloudPlatformSpec `json:"alibabaCloud,omitempty"` + Nutanix *NutanixPlatformSpecApplyConfiguration `json:"nutanix,omitempty"` + External *ExternalPlatformSpecApplyConfiguration `json:"external,omitempty"` } // PlatformSpecApplyConfiguration constructs an declarative configuration of the PlatformSpec type for use with @@ -90,8 +91,8 @@ func (b *PlatformSpecApplyConfiguration) WithOvirt(value v1.OvirtPlatformSpec) * // WithVSphere sets the VSphere field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the VSphere field is set to the value of the last call. -func (b *PlatformSpecApplyConfiguration) WithVSphere(value v1.VSpherePlatformSpec) *PlatformSpecApplyConfiguration { - b.VSphere = &value +func (b *PlatformSpecApplyConfiguration) WithVSphere(value *VSpherePlatformSpecApplyConfiguration) *PlatformSpecApplyConfiguration { + b.VSphere = value return b } @@ -142,3 +143,11 @@ func (b *PlatformSpecApplyConfiguration) WithNutanix(value *NutanixPlatformSpecA b.Nutanix = value return b } + +// WithExternal sets the External field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the External field is set to the value of the last call. +func (b *PlatformSpecApplyConfiguration) WithExternal(value *ExternalPlatformSpecApplyConfiguration) *PlatformSpecApplyConfiguration { + b.External = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/platformstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/platformstatus.go index d027ef411..92c264062 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/platformstatus.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/platformstatus.go @@ -23,6 +23,7 @@ type PlatformStatusApplyConfiguration struct { PowerVS *PowerVSPlatformStatusApplyConfiguration `json:"powervs,omitempty"` AlibabaCloud *AlibabaCloudPlatformStatusApplyConfiguration `json:"alibabaCloud,omitempty"` Nutanix *NutanixPlatformStatusApplyConfiguration `json:"nutanix,omitempty"` + External *v1.ExternalPlatformStatus `json:"external,omitempty"` } // PlatformStatusApplyConfiguration constructs an declarative configuration of the PlatformStatus type for use with @@ -142,3 +143,11 @@ func (b *PlatformStatusApplyConfiguration) WithNutanix(value *NutanixPlatformSta b.Nutanix = value return b } + +// WithExternal sets the External field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the External field is set to the value of the last call. +func (b *PlatformStatusApplyConfiguration) WithExternal(value v1.ExternalPlatformStatus) *PlatformStatusApplyConfiguration { + b.External = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/powervsplatformstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/powervsplatformstatus.go index d8b28629b..c1660d005 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/powervsplatformstatus.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/powervsplatformstatus.go @@ -7,6 +7,7 @@ package v1 type PowerVSPlatformStatusApplyConfiguration struct { Region *string `json:"region,omitempty"` Zone *string `json:"zone,omitempty"` + ResourceGroup *string `json:"resourceGroup,omitempty"` ServiceEndpoints []PowerVSServiceEndpointApplyConfiguration `json:"serviceEndpoints,omitempty"` CISInstanceCRN *string `json:"cisInstanceCRN,omitempty"` DNSInstanceCRN *string `json:"dnsInstanceCRN,omitempty"` @@ -34,6 +35,14 @@ func (b *PowerVSPlatformStatusApplyConfiguration) WithZone(value string) *PowerV return b } +// WithResourceGroup sets the ResourceGroup field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceGroup field is set to the value of the last call. +func (b *PowerVSPlatformStatusApplyConfiguration) WithResourceGroup(value string) *PowerVSPlatformStatusApplyConfiguration { + b.ResourceGroup = &value + return b +} + // WithServiceEndpoints adds the given value to the ServiceEndpoints field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the ServiceEndpoints field. diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/update.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/update.go index dd7022919..1f63851c2 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/update.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/update.go @@ -2,12 +2,17 @@ package v1 +import ( + v1 "github.com/openshift/api/config/v1" +) + // UpdateApplyConfiguration represents an declarative configuration of the Update type for use // with apply. type UpdateApplyConfiguration struct { - Version *string `json:"version,omitempty"` - Image *string `json:"image,omitempty"` - Force *bool `json:"force,omitempty"` + Architecture *v1.ClusterVersionArchitecture `json:"architecture,omitempty"` + Version *string `json:"version,omitempty"` + Image *string `json:"image,omitempty"` + Force *bool `json:"force,omitempty"` } // UpdateApplyConfiguration constructs an declarative configuration of the Update type for use with @@ -16,6 +21,14 @@ func Update() *UpdateApplyConfiguration { return &UpdateApplyConfiguration{} } +// WithArchitecture sets the Architecture field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Architecture field is set to the value of the last call. +func (b *UpdateApplyConfiguration) WithArchitecture(value v1.ClusterVersionArchitecture) *UpdateApplyConfiguration { + b.Architecture = &value + return b +} + // WithVersion sets the Version field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Version field is set to the value of the last call. diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformfailuredomainspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformfailuredomainspec.go new file mode 100644 index 000000000..0bad0fadf --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformfailuredomainspec.go @@ -0,0 +1,59 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// VSpherePlatformFailureDomainSpecApplyConfiguration represents an declarative configuration of the VSpherePlatformFailureDomainSpec type for use +// with apply. +type VSpherePlatformFailureDomainSpecApplyConfiguration struct { + Name *string `json:"name,omitempty"` + Region *string `json:"region,omitempty"` + Zone *string `json:"zone,omitempty"` + Server *string `json:"server,omitempty"` + Topology *VSpherePlatformTopologyApplyConfiguration `json:"topology,omitempty"` +} + +// VSpherePlatformFailureDomainSpecApplyConfiguration constructs an declarative configuration of the VSpherePlatformFailureDomainSpec type for use with +// apply. +func VSpherePlatformFailureDomainSpec() *VSpherePlatformFailureDomainSpecApplyConfiguration { + return &VSpherePlatformFailureDomainSpecApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *VSpherePlatformFailureDomainSpecApplyConfiguration) WithName(value string) *VSpherePlatformFailureDomainSpecApplyConfiguration { + b.Name = &value + return b +} + +// WithRegion sets the Region field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Region field is set to the value of the last call. +func (b *VSpherePlatformFailureDomainSpecApplyConfiguration) WithRegion(value string) *VSpherePlatformFailureDomainSpecApplyConfiguration { + b.Region = &value + return b +} + +// WithZone sets the Zone field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Zone field is set to the value of the last call. +func (b *VSpherePlatformFailureDomainSpecApplyConfiguration) WithZone(value string) *VSpherePlatformFailureDomainSpecApplyConfiguration { + b.Zone = &value + return b +} + +// WithServer sets the Server field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Server field is set to the value of the last call. +func (b *VSpherePlatformFailureDomainSpecApplyConfiguration) WithServer(value string) *VSpherePlatformFailureDomainSpecApplyConfiguration { + b.Server = &value + return b +} + +// WithTopology sets the Topology field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Topology field is set to the value of the last call. +func (b *VSpherePlatformFailureDomainSpecApplyConfiguration) WithTopology(value *VSpherePlatformTopologyApplyConfiguration) *VSpherePlatformFailureDomainSpecApplyConfiguration { + b.Topology = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformloadbalancer.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformloadbalancer.go new file mode 100644 index 000000000..873f0289e --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformloadbalancer.go @@ -0,0 +1,27 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/openshift/api/config/v1" +) + +// VSpherePlatformLoadBalancerApplyConfiguration represents an declarative configuration of the VSpherePlatformLoadBalancer type for use +// with apply. +type VSpherePlatformLoadBalancerApplyConfiguration struct { + Type *v1.PlatformLoadBalancerType `json:"type,omitempty"` +} + +// VSpherePlatformLoadBalancerApplyConfiguration constructs an declarative configuration of the VSpherePlatformLoadBalancer type for use with +// apply. +func VSpherePlatformLoadBalancer() *VSpherePlatformLoadBalancerApplyConfiguration { + return &VSpherePlatformLoadBalancerApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *VSpherePlatformLoadBalancerApplyConfiguration) WithType(value v1.PlatformLoadBalancerType) *VSpherePlatformLoadBalancerApplyConfiguration { + b.Type = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformnodenetworking.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformnodenetworking.go new file mode 100644 index 000000000..042737f1d --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformnodenetworking.go @@ -0,0 +1,32 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// VSpherePlatformNodeNetworkingApplyConfiguration represents an declarative configuration of the VSpherePlatformNodeNetworking type for use +// with apply. +type VSpherePlatformNodeNetworkingApplyConfiguration struct { + External *VSpherePlatformNodeNetworkingSpecApplyConfiguration `json:"external,omitempty"` + Internal *VSpherePlatformNodeNetworkingSpecApplyConfiguration `json:"internal,omitempty"` +} + +// VSpherePlatformNodeNetworkingApplyConfiguration constructs an declarative configuration of the VSpherePlatformNodeNetworking type for use with +// apply. +func VSpherePlatformNodeNetworking() *VSpherePlatformNodeNetworkingApplyConfiguration { + return &VSpherePlatformNodeNetworkingApplyConfiguration{} +} + +// WithExternal sets the External field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the External field is set to the value of the last call. +func (b *VSpherePlatformNodeNetworkingApplyConfiguration) WithExternal(value *VSpherePlatformNodeNetworkingSpecApplyConfiguration) *VSpherePlatformNodeNetworkingApplyConfiguration { + b.External = value + return b +} + +// WithInternal sets the Internal field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Internal field is set to the value of the last call. +func (b *VSpherePlatformNodeNetworkingApplyConfiguration) WithInternal(value *VSpherePlatformNodeNetworkingSpecApplyConfiguration) *VSpherePlatformNodeNetworkingApplyConfiguration { + b.Internal = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformnodenetworkingspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformnodenetworkingspec.go new file mode 100644 index 000000000..e13c42d64 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformnodenetworkingspec.go @@ -0,0 +1,45 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// VSpherePlatformNodeNetworkingSpecApplyConfiguration represents an declarative configuration of the VSpherePlatformNodeNetworkingSpec type for use +// with apply. +type VSpherePlatformNodeNetworkingSpecApplyConfiguration struct { + NetworkSubnetCIDR []string `json:"networkSubnetCidr,omitempty"` + Network *string `json:"network,omitempty"` + ExcludeNetworkSubnetCIDR []string `json:"excludeNetworkSubnetCidr,omitempty"` +} + +// VSpherePlatformNodeNetworkingSpecApplyConfiguration constructs an declarative configuration of the VSpherePlatformNodeNetworkingSpec type for use with +// apply. +func VSpherePlatformNodeNetworkingSpec() *VSpherePlatformNodeNetworkingSpecApplyConfiguration { + return &VSpherePlatformNodeNetworkingSpecApplyConfiguration{} +} + +// WithNetworkSubnetCIDR adds the given value to the NetworkSubnetCIDR field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the NetworkSubnetCIDR field. +func (b *VSpherePlatformNodeNetworkingSpecApplyConfiguration) WithNetworkSubnetCIDR(values ...string) *VSpherePlatformNodeNetworkingSpecApplyConfiguration { + for i := range values { + b.NetworkSubnetCIDR = append(b.NetworkSubnetCIDR, values[i]) + } + return b +} + +// WithNetwork sets the Network field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Network field is set to the value of the last call. +func (b *VSpherePlatformNodeNetworkingSpecApplyConfiguration) WithNetwork(value string) *VSpherePlatformNodeNetworkingSpecApplyConfiguration { + b.Network = &value + return b +} + +// WithExcludeNetworkSubnetCIDR adds the given value to the ExcludeNetworkSubnetCIDR field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ExcludeNetworkSubnetCIDR field. +func (b *VSpherePlatformNodeNetworkingSpecApplyConfiguration) WithExcludeNetworkSubnetCIDR(values ...string) *VSpherePlatformNodeNetworkingSpecApplyConfiguration { + for i := range values { + b.ExcludeNetworkSubnetCIDR = append(b.ExcludeNetworkSubnetCIDR, values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformspec.go new file mode 100644 index 000000000..3223b2ddf --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformspec.go @@ -0,0 +1,51 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// VSpherePlatformSpecApplyConfiguration represents an declarative configuration of the VSpherePlatformSpec type for use +// with apply. +type VSpherePlatformSpecApplyConfiguration struct { + VCenters []VSpherePlatformVCenterSpecApplyConfiguration `json:"vcenters,omitempty"` + FailureDomains []VSpherePlatformFailureDomainSpecApplyConfiguration `json:"failureDomains,omitempty"` + NodeNetworking *VSpherePlatformNodeNetworkingApplyConfiguration `json:"nodeNetworking,omitempty"` +} + +// VSpherePlatformSpecApplyConfiguration constructs an declarative configuration of the VSpherePlatformSpec type for use with +// apply. +func VSpherePlatformSpec() *VSpherePlatformSpecApplyConfiguration { + return &VSpherePlatformSpecApplyConfiguration{} +} + +// WithVCenters adds the given value to the VCenters field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the VCenters field. +func (b *VSpherePlatformSpecApplyConfiguration) WithVCenters(values ...*VSpherePlatformVCenterSpecApplyConfiguration) *VSpherePlatformSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithVCenters") + } + b.VCenters = append(b.VCenters, *values[i]) + } + return b +} + +// WithFailureDomains adds the given value to the FailureDomains field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the FailureDomains field. +func (b *VSpherePlatformSpecApplyConfiguration) WithFailureDomains(values ...*VSpherePlatformFailureDomainSpecApplyConfiguration) *VSpherePlatformSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithFailureDomains") + } + b.FailureDomains = append(b.FailureDomains, *values[i]) + } + return b +} + +// WithNodeNetworking sets the NodeNetworking field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NodeNetworking field is set to the value of the last call. +func (b *VSpherePlatformSpecApplyConfiguration) WithNodeNetworking(value *VSpherePlatformNodeNetworkingApplyConfiguration) *VSpherePlatformSpecApplyConfiguration { + b.NodeNetworking = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformstatus.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformstatus.go index 9537bc197..01e6fe9fe 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformstatus.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformstatus.go @@ -5,11 +5,12 @@ package v1 // VSpherePlatformStatusApplyConfiguration represents an declarative configuration of the VSpherePlatformStatus type for use // with apply. type VSpherePlatformStatusApplyConfiguration struct { - APIServerInternalIP *string `json:"apiServerInternalIP,omitempty"` - APIServerInternalIPs []string `json:"apiServerInternalIPs,omitempty"` - IngressIP *string `json:"ingressIP,omitempty"` - IngressIPs []string `json:"ingressIPs,omitempty"` - NodeDNSIP *string `json:"nodeDNSIP,omitempty"` + APIServerInternalIP *string `json:"apiServerInternalIP,omitempty"` + APIServerInternalIPs []string `json:"apiServerInternalIPs,omitempty"` + IngressIP *string `json:"ingressIP,omitempty"` + IngressIPs []string `json:"ingressIPs,omitempty"` + NodeDNSIP *string `json:"nodeDNSIP,omitempty"` + LoadBalancer *VSpherePlatformLoadBalancerApplyConfiguration `json:"loadBalancer,omitempty"` } // VSpherePlatformStatusApplyConfiguration constructs an declarative configuration of the VSpherePlatformStatus type for use with @@ -61,3 +62,11 @@ func (b *VSpherePlatformStatusApplyConfiguration) WithNodeDNSIP(value string) *V b.NodeDNSIP = &value return b } + +// WithLoadBalancer sets the LoadBalancer field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LoadBalancer field is set to the value of the last call. +func (b *VSpherePlatformStatusApplyConfiguration) WithLoadBalancer(value *VSpherePlatformLoadBalancerApplyConfiguration) *VSpherePlatformStatusApplyConfiguration { + b.LoadBalancer = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformtopology.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformtopology.go new file mode 100644 index 000000000..1e1d65169 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformtopology.go @@ -0,0 +1,70 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// VSpherePlatformTopologyApplyConfiguration represents an declarative configuration of the VSpherePlatformTopology type for use +// with apply. +type VSpherePlatformTopologyApplyConfiguration struct { + Datacenter *string `json:"datacenter,omitempty"` + ComputeCluster *string `json:"computeCluster,omitempty"` + Networks []string `json:"networks,omitempty"` + Datastore *string `json:"datastore,omitempty"` + ResourcePool *string `json:"resourcePool,omitempty"` + Folder *string `json:"folder,omitempty"` +} + +// VSpherePlatformTopologyApplyConfiguration constructs an declarative configuration of the VSpherePlatformTopology type for use with +// apply. +func VSpherePlatformTopology() *VSpherePlatformTopologyApplyConfiguration { + return &VSpherePlatformTopologyApplyConfiguration{} +} + +// WithDatacenter sets the Datacenter field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Datacenter field is set to the value of the last call. +func (b *VSpherePlatformTopologyApplyConfiguration) WithDatacenter(value string) *VSpherePlatformTopologyApplyConfiguration { + b.Datacenter = &value + return b +} + +// WithComputeCluster sets the ComputeCluster field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ComputeCluster field is set to the value of the last call. +func (b *VSpherePlatformTopologyApplyConfiguration) WithComputeCluster(value string) *VSpherePlatformTopologyApplyConfiguration { + b.ComputeCluster = &value + return b +} + +// WithNetworks adds the given value to the Networks field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Networks field. +func (b *VSpherePlatformTopologyApplyConfiguration) WithNetworks(values ...string) *VSpherePlatformTopologyApplyConfiguration { + for i := range values { + b.Networks = append(b.Networks, values[i]) + } + return b +} + +// WithDatastore sets the Datastore field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Datastore field is set to the value of the last call. +func (b *VSpherePlatformTopologyApplyConfiguration) WithDatastore(value string) *VSpherePlatformTopologyApplyConfiguration { + b.Datastore = &value + return b +} + +// WithResourcePool sets the ResourcePool field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourcePool field is set to the value of the last call. +func (b *VSpherePlatformTopologyApplyConfiguration) WithResourcePool(value string) *VSpherePlatformTopologyApplyConfiguration { + b.ResourcePool = &value + return b +} + +// WithFolder sets the Folder field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Folder field is set to the value of the last call. +func (b *VSpherePlatformTopologyApplyConfiguration) WithFolder(value string) *VSpherePlatformTopologyApplyConfiguration { + b.Folder = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformvcenterspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformvcenterspec.go new file mode 100644 index 000000000..59b2261c2 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1/vsphereplatformvcenterspec.go @@ -0,0 +1,43 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// VSpherePlatformVCenterSpecApplyConfiguration represents an declarative configuration of the VSpherePlatformVCenterSpec type for use +// with apply. +type VSpherePlatformVCenterSpecApplyConfiguration struct { + Server *string `json:"server,omitempty"` + Port *int32 `json:"port,omitempty"` + Datacenters []string `json:"datacenters,omitempty"` +} + +// VSpherePlatformVCenterSpecApplyConfiguration constructs an declarative configuration of the VSpherePlatformVCenterSpec type for use with +// apply. +func VSpherePlatformVCenterSpec() *VSpherePlatformVCenterSpecApplyConfiguration { + return &VSpherePlatformVCenterSpecApplyConfiguration{} +} + +// WithServer sets the Server field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Server field is set to the value of the last call. +func (b *VSpherePlatformVCenterSpecApplyConfiguration) WithServer(value string) *VSpherePlatformVCenterSpecApplyConfiguration { + b.Server = &value + return b +} + +// WithPort sets the Port field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Port field is set to the value of the last call. +func (b *VSpherePlatformVCenterSpecApplyConfiguration) WithPort(value int32) *VSpherePlatformVCenterSpecApplyConfiguration { + b.Port = &value + return b +} + +// WithDatacenters adds the given value to the Datacenters field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Datacenters field. +func (b *VSpherePlatformVCenterSpecApplyConfiguration) WithDatacenters(values ...string) *VSpherePlatformVCenterSpecApplyConfiguration { + for i := range values { + b.Datacenters = append(b.Datacenters, values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/gatherconfig.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/gatherconfig.go new file mode 100644 index 000000000..2eec8ffd2 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/gatherconfig.go @@ -0,0 +1,38 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "github.com/openshift/api/config/v1alpha1" +) + +// GatherConfigApplyConfiguration represents an declarative configuration of the GatherConfig type for use +// with apply. +type GatherConfigApplyConfiguration struct { + DataPolicy *v1alpha1.DataPolicy `json:"dataPolicy,omitempty"` + DisabledGatherers []string `json:"disabledGatherers,omitempty"` +} + +// GatherConfigApplyConfiguration constructs an declarative configuration of the GatherConfig type for use with +// apply. +func GatherConfig() *GatherConfigApplyConfiguration { + return &GatherConfigApplyConfiguration{} +} + +// WithDataPolicy sets the DataPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DataPolicy field is set to the value of the last call. +func (b *GatherConfigApplyConfiguration) WithDataPolicy(value v1alpha1.DataPolicy) *GatherConfigApplyConfiguration { + b.DataPolicy = &value + return b +} + +// WithDisabledGatherers adds the given value to the DisabledGatherers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the DisabledGatherers field. +func (b *GatherConfigApplyConfiguration) WithDisabledGatherers(values ...string) *GatherConfigApplyConfiguration { + for i := range values { + b.DisabledGatherers = append(b.DisabledGatherers, values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/insightsdatagather.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/insightsdatagather.go new file mode 100644 index 000000000..b86f19208 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/insightsdatagather.go @@ -0,0 +1,240 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + configv1alpha1 "github.com/openshift/api/config/v1alpha1" + internal "github.com/openshift/client-go/config/applyconfigurations/internal" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// InsightsDataGatherApplyConfiguration represents an declarative configuration of the InsightsDataGather type for use +// with apply. +type InsightsDataGatherApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *InsightsDataGatherSpecApplyConfiguration `json:"spec,omitempty"` + Status *configv1alpha1.InsightsDataGatherStatus `json:"status,omitempty"` +} + +// InsightsDataGather constructs an declarative configuration of the InsightsDataGather type for use with +// apply. +func InsightsDataGather(name string) *InsightsDataGatherApplyConfiguration { + b := &InsightsDataGatherApplyConfiguration{} + b.WithName(name) + b.WithKind("InsightsDataGather") + b.WithAPIVersion("config.openshift.io/v1alpha1") + return b +} + +// ExtractInsightsDataGather extracts the applied configuration owned by fieldManager from +// insightsDataGather. If no managedFields are found in insightsDataGather for fieldManager, a +// InsightsDataGatherApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// insightsDataGather must be a unmodified InsightsDataGather API object that was retrieved from the Kubernetes API. +// ExtractInsightsDataGather provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractInsightsDataGather(insightsDataGather *configv1alpha1.InsightsDataGather, fieldManager string) (*InsightsDataGatherApplyConfiguration, error) { + return extractInsightsDataGather(insightsDataGather, fieldManager, "") +} + +// ExtractInsightsDataGatherStatus is the same as ExtractInsightsDataGather except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractInsightsDataGatherStatus(insightsDataGather *configv1alpha1.InsightsDataGather, fieldManager string) (*InsightsDataGatherApplyConfiguration, error) { + return extractInsightsDataGather(insightsDataGather, fieldManager, "status") +} + +func extractInsightsDataGather(insightsDataGather *configv1alpha1.InsightsDataGather, fieldManager string, subresource string) (*InsightsDataGatherApplyConfiguration, error) { + b := &InsightsDataGatherApplyConfiguration{} + err := managedfields.ExtractInto(insightsDataGather, internal.Parser().Type("com.github.openshift.api.config.v1alpha1.InsightsDataGather"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(insightsDataGather.Name) + + b.WithKind("InsightsDataGather") + b.WithAPIVersion("config.openshift.io/v1alpha1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *InsightsDataGatherApplyConfiguration) WithKind(value string) *InsightsDataGatherApplyConfiguration { + b.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *InsightsDataGatherApplyConfiguration) WithAPIVersion(value string) *InsightsDataGatherApplyConfiguration { + b.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *InsightsDataGatherApplyConfiguration) WithName(value string) *InsightsDataGatherApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *InsightsDataGatherApplyConfiguration) WithGenerateName(value string) *InsightsDataGatherApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *InsightsDataGatherApplyConfiguration) WithNamespace(value string) *InsightsDataGatherApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *InsightsDataGatherApplyConfiguration) WithUID(value types.UID) *InsightsDataGatherApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *InsightsDataGatherApplyConfiguration) WithResourceVersion(value string) *InsightsDataGatherApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *InsightsDataGatherApplyConfiguration) WithGeneration(value int64) *InsightsDataGatherApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *InsightsDataGatherApplyConfiguration) WithCreationTimestamp(value metav1.Time) *InsightsDataGatherApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *InsightsDataGatherApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *InsightsDataGatherApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *InsightsDataGatherApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *InsightsDataGatherApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *InsightsDataGatherApplyConfiguration) WithLabels(entries map[string]string) *InsightsDataGatherApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Labels == nil && len(entries) > 0 { + b.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *InsightsDataGatherApplyConfiguration) WithAnnotations(entries map[string]string) *InsightsDataGatherApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Annotations == nil && len(entries) > 0 { + b.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *InsightsDataGatherApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *InsightsDataGatherApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.OwnerReferences = append(b.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *InsightsDataGatherApplyConfiguration) WithFinalizers(values ...string) *InsightsDataGatherApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.Finalizers = append(b.Finalizers, values[i]) + } + return b +} + +func (b *InsightsDataGatherApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *InsightsDataGatherApplyConfiguration) WithSpec(value *InsightsDataGatherSpecApplyConfiguration) *InsightsDataGatherApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *InsightsDataGatherApplyConfiguration) WithStatus(value configv1alpha1.InsightsDataGatherStatus) *InsightsDataGatherApplyConfiguration { + b.Status = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/insightsdatagatherspec.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/insightsdatagatherspec.go new file mode 100644 index 000000000..44416cf85 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1/insightsdatagatherspec.go @@ -0,0 +1,23 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +// InsightsDataGatherSpecApplyConfiguration represents an declarative configuration of the InsightsDataGatherSpec type for use +// with apply. +type InsightsDataGatherSpecApplyConfiguration struct { + GatherConfig *GatherConfigApplyConfiguration `json:"gatherConfig,omitempty"` +} + +// InsightsDataGatherSpecApplyConfiguration constructs an declarative configuration of the InsightsDataGatherSpec type for use with +// apply. +func InsightsDataGatherSpec() *InsightsDataGatherSpecApplyConfiguration { + return &InsightsDataGatherSpecApplyConfiguration{} +} + +// WithGatherConfig sets the GatherConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GatherConfig field is set to the value of the last call. +func (b *InsightsDataGatherSpecApplyConfiguration) WithGatherConfig(value *GatherConfigApplyConfiguration) *InsightsDataGatherSpecApplyConfiguration { + b.GatherConfig = value + return b +} diff --git a/vendor/github.com/openshift/client-go/config/applyconfigurations/internal/internal.go b/vendor/github.com/openshift/client-go/config/applyconfigurations/internal/internal.go index a36289368..8ec86a2ab 100644 --- a/vendor/github.com/openshift/client-go/config/applyconfigurations/internal/internal.go +++ b/vendor/github.com/openshift/client-go/config/applyconfigurations/internal/internal.go @@ -112,6 +112,13 @@ var schemaYAML = typed.YAMLObject(`types: elementType: namedType: __untyped_deduced_ elementRelationship: separable +- name: com.github.openshift.api.config.v1.AWSDNSSpec + map: + fields: + - name: privateZoneIAMRole + type: + scalar: string + default: "" - name: com.github.openshift.api.config.v1.AWSIngressSpec map: fields: @@ -315,6 +322,32 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" + - name: resourceTags + type: + list: + elementType: + namedType: com.github.openshift.api.config.v1.AzureResourceTag + elementRelationship: atomic +- name: com.github.openshift.api.config.v1.AzureResourceTag + map: + fields: + - name: key + type: + scalar: string + default: "" + - name: value + type: + scalar: string + default: "" +- name: com.github.openshift.api.config.v1.BareMetalPlatformLoadBalancer + map: + fields: + - name: type + type: + scalar: string + default: OpenShiftManagedDefault + unions: + - discriminator: type - name: com.github.openshift.api.config.v1.BareMetalPlatformSpec map: elementType: @@ -348,6 +381,11 @@ var schemaYAML = typed.YAMLObject(`types: elementType: scalar: string elementRelationship: atomic + - name: loadBalancer + type: + namedType: com.github.openshift.api.config.v1.BareMetalPlatformLoadBalancer + default: + type: OpenShiftManagedDefault - name: nodeDNSIP type: scalar: string @@ -901,6 +939,21 @@ var schemaYAML = typed.YAMLObject(`types: type: namedType: com.github.openshift.api.config.v1.DNSStatus default: {} +- name: com.github.openshift.api.config.v1.DNSPlatformSpec + map: + fields: + - name: aws + type: + namedType: com.github.openshift.api.config.v1.AWSDNSSpec + - name: type + type: + scalar: string + default: "" + unions: + - discriminator: type + fields: + - fieldName: aws + discriminatorValue: AWS - name: com.github.openshift.api.config.v1.DNSSpec map: fields: @@ -908,6 +961,10 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" + - name: platform + type: + namedType: com.github.openshift.api.config.v1.DNSPlatformSpec + default: {} - name: privateZone type: namedType: com.github.openshift.api.config.v1.DNSZone @@ -992,6 +1049,25 @@ var schemaYAML = typed.YAMLObject(`types: elementType: scalar: string elementRelationship: atomic +- name: com.github.openshift.api.config.v1.ExternalPlatformSpec + map: + fields: + - name: platformName + type: + scalar: string + default: Unknown +- name: com.github.openshift.api.config.v1.ExternalPlatformStatus + map: + elementType: + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_deduced_ + elementRelationship: separable - name: com.github.openshift.api.config.v1.FeatureGate map: fields: @@ -1013,6 +1089,32 @@ var schemaYAML = typed.YAMLObject(`types: type: namedType: com.github.openshift.api.config.v1.FeatureGateStatus default: {} +- name: com.github.openshift.api.config.v1.FeatureGateAttributes + map: + fields: + - name: name + type: + scalar: string + default: "" +- name: com.github.openshift.api.config.v1.FeatureGateDetails + map: + fields: + - name: disabled + type: + list: + elementType: + namedType: com.github.openshift.api.config.v1.FeatureGateAttributes + elementRelationship: atomic + - name: enabled + type: + list: + elementType: + namedType: com.github.openshift.api.config.v1.FeatureGateAttributes + elementRelationship: atomic + - name: version + type: + scalar: string + default: "" - name: com.github.openshift.api.config.v1.FeatureGateSpec map: fields: @@ -1029,16 +1131,23 @@ var schemaYAML = typed.YAMLObject(`types: discriminatorValue: CustomNoUpgrade - name: com.github.openshift.api.config.v1.FeatureGateStatus map: - elementType: - scalar: untyped - list: - elementType: - namedType: __untyped_atomic_ - elementRelationship: atomic - map: - elementType: - namedType: __untyped_deduced_ - elementRelationship: separable + fields: + - name: conditions + type: + list: + elementType: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Condition + elementRelationship: associative + keys: + - type + - name: featureGates + type: + list: + elementType: + namedType: com.github.openshift.api.config.v1.FeatureGateDetails + elementRelationship: associative + keys: + - version - name: com.github.openshift.api.config.v1.GCPPlatformSpec map: elementType: @@ -1482,6 +1591,10 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" + - name: cpuPartitioning + type: + scalar: string + default: None - name: etcdDiscoveryDomain type: scalar: string @@ -1555,7 +1668,7 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" - - name: loadbalancer + - name: loadBalancer type: namedType: com.github.openshift.api.config.v1.LoadBalancer default: {} @@ -1858,6 +1971,15 @@ var schemaYAML = typed.YAMLObject(`types: elementType: namedType: __untyped_deduced_ elementRelationship: separable +- name: com.github.openshift.api.config.v1.NutanixPlatformLoadBalancer + map: + fields: + - name: type + type: + scalar: string + default: OpenShiftManagedDefault + unions: + - discriminator: type - name: com.github.openshift.api.config.v1.NutanixPlatformSpec map: fields: @@ -1894,6 +2016,11 @@ var schemaYAML = typed.YAMLObject(`types: elementType: scalar: string elementRelationship: atomic + - name: loadBalancer + type: + namedType: com.github.openshift.api.config.v1.NutanixPlatformLoadBalancer + default: + type: OpenShiftManagedDefault - name: com.github.openshift.api.config.v1.NutanixPrismElementEndpoint map: fields: @@ -2072,6 +2199,15 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" +- name: com.github.openshift.api.config.v1.OpenStackPlatformLoadBalancer + map: + fields: + - name: type + type: + scalar: string + default: OpenShiftManagedDefault + unions: + - discriminator: type - name: com.github.openshift.api.config.v1.OpenStackPlatformSpec map: elementType: @@ -2108,6 +2244,11 @@ var schemaYAML = typed.YAMLObject(`types: elementType: scalar: string elementRelationship: atomic + - name: loadBalancer + type: + namedType: com.github.openshift.api.config.v1.OpenStackPlatformLoadBalancer + default: + type: OpenShiftManagedDefault - name: nodeDNSIP type: scalar: string @@ -2164,6 +2305,15 @@ var schemaYAML = typed.YAMLObject(`types: elementType: namedType: com.github.openshift.api.config.v1.HubSourceStatus elementRelationship: atomic +- name: com.github.openshift.api.config.v1.OvirtPlatformLoadBalancer + map: + fields: + - name: type + type: + scalar: string + default: OpenShiftManagedDefault + unions: + - discriminator: type - name: com.github.openshift.api.config.v1.OvirtPlatformSpec map: elementType: @@ -2197,6 +2347,11 @@ var schemaYAML = typed.YAMLObject(`types: elementType: scalar: string elementRelationship: atomic + - name: loadBalancer + type: + namedType: com.github.openshift.api.config.v1.OvirtPlatformLoadBalancer + default: + type: OpenShiftManagedDefault - name: nodeDNSIP type: scalar: string @@ -2218,6 +2373,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: equinixMetal type: namedType: com.github.openshift.api.config.v1.EquinixMetalPlatformSpec + - name: external + type: + namedType: com.github.openshift.api.config.v1.ExternalPlatformSpec - name: gcp type: namedType: com.github.openshift.api.config.v1.GCPPlatformSpec @@ -2264,6 +2422,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: equinixMetal type: namedType: com.github.openshift.api.config.v1.EquinixMetalPlatformStatus + - name: external + type: + namedType: com.github.openshift.api.config.v1.ExternalPlatformStatus - name: gcp type: namedType: com.github.openshift.api.config.v1.GCPPlatformStatus @@ -2316,6 +2477,10 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" + - name: resourceGroup + type: + scalar: string + default: "" - name: serviceEndpoints type: list: @@ -2692,6 +2857,10 @@ var schemaYAML = typed.YAMLObject(`types: - name: com.github.openshift.api.config.v1.Update map: fields: + - name: architecture + type: + scalar: string + default: "" - name: force type: scalar: boolean @@ -2733,18 +2902,86 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" +- name: com.github.openshift.api.config.v1.VSpherePlatformFailureDomainSpec + map: + fields: + - name: name + type: + scalar: string + default: "" + - name: region + type: + scalar: string + default: "" + - name: server + type: + scalar: string + default: "" + - name: topology + type: + namedType: com.github.openshift.api.config.v1.VSpherePlatformTopology + default: {} + - name: zone + type: + scalar: string + default: "" +- name: com.github.openshift.api.config.v1.VSpherePlatformLoadBalancer + map: + fields: + - name: type + type: + scalar: string + default: OpenShiftManagedDefault + unions: + - discriminator: type +- name: com.github.openshift.api.config.v1.VSpherePlatformNodeNetworking + map: + fields: + - name: external + type: + namedType: com.github.openshift.api.config.v1.VSpherePlatformNodeNetworkingSpec + default: {} + - name: internal + type: + namedType: com.github.openshift.api.config.v1.VSpherePlatformNodeNetworkingSpec + default: {} +- name: com.github.openshift.api.config.v1.VSpherePlatformNodeNetworkingSpec + map: + fields: + - name: excludeNetworkSubnetCidr + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: network + type: + scalar: string + - name: networkSubnetCidr + type: + list: + elementType: + scalar: string + elementRelationship: atomic - name: com.github.openshift.api.config.v1.VSpherePlatformSpec map: - elementType: - scalar: untyped - list: - elementType: - namedType: __untyped_atomic_ - elementRelationship: atomic - map: - elementType: - namedType: __untyped_deduced_ - elementRelationship: separable + fields: + - name: failureDomains + type: + list: + elementType: + namedType: com.github.openshift.api.config.v1.VSpherePlatformFailureDomainSpec + elementRelationship: atomic + - name: nodeNetworking + type: + namedType: com.github.openshift.api.config.v1.VSpherePlatformNodeNetworking + default: {} + - name: vcenters + type: + list: + elementType: + namedType: com.github.openshift.api.config.v1.VSpherePlatformVCenterSpec + elementRelationship: atomic - name: com.github.openshift.api.config.v1.VSpherePlatformStatus map: fields: @@ -2766,9 +3003,57 @@ var schemaYAML = typed.YAMLObject(`types: elementType: scalar: string elementRelationship: atomic + - name: loadBalancer + type: + namedType: com.github.openshift.api.config.v1.VSpherePlatformLoadBalancer + default: + type: OpenShiftManagedDefault - name: nodeDNSIP type: scalar: string +- name: com.github.openshift.api.config.v1.VSpherePlatformTopology + map: + fields: + - name: computeCluster + type: + scalar: string + default: "" + - name: datacenter + type: + scalar: string + default: "" + - name: datastore + type: + scalar: string + default: "" + - name: folder + type: + scalar: string + - name: networks + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: resourcePool + type: + scalar: string +- name: com.github.openshift.api.config.v1.VSpherePlatformVCenterSpec + map: + fields: + - name: datacenters + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: port + type: + scalar: numeric + - name: server + type: + scalar: string + default: "" - name: com.github.openshift.api.config.v1.WebhookTokenAuthenticator map: fields: @@ -2776,6 +3061,58 @@ var schemaYAML = typed.YAMLObject(`types: type: namedType: com.github.openshift.api.config.v1.SecretNameReference default: {} +- name: com.github.openshift.api.config.v1alpha1.GatherConfig + map: + fields: + - name: dataPolicy + type: + scalar: string + - name: disabledGatherers + type: + list: + elementType: + scalar: string + elementRelationship: atomic +- name: com.github.openshift.api.config.v1alpha1.InsightsDataGather + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: com.github.openshift.api.config.v1alpha1.InsightsDataGatherSpec + default: {} + - name: status + type: + namedType: com.github.openshift.api.config.v1alpha1.InsightsDataGatherStatus + default: {} +- name: com.github.openshift.api.config.v1alpha1.InsightsDataGatherSpec + map: + fields: + - name: gatherConfig + type: + namedType: com.github.openshift.api.config.v1alpha1.GatherConfig + default: {} +- name: com.github.openshift.api.config.v1alpha1.InsightsDataGatherStatus + map: + elementType: + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_deduced_ + elementRelationship: separable - name: io.k8s.api.core.v1.ConfigMapKeySelector map: fields: @@ -2829,6 +3166,13 @@ var schemaYAML = typed.YAMLObject(`types: scalar: string default: "" elementRelationship: atomic +- name: io.k8s.api.core.v1.ResourceClaim + map: + fields: + - name: name + type: + scalar: string + default: "" - name: io.k8s.api.core.v1.ResourceFieldSelector map: fields: @@ -2847,6 +3191,14 @@ var schemaYAML = typed.YAMLObject(`types: - name: io.k8s.api.core.v1.ResourceRequirements map: fields: + - name: claims + type: + list: + elementType: + namedType: io.k8s.api.core.v1.ResourceClaim + elementRelationship: associative + keys: + - name - name: limits type: map: diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/clientset.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/clientset.go index 6a361b1f6..f2559671a 100644 --- a/vendor/github.com/openshift/client-go/config/clientset/versioned/clientset.go +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/clientset.go @@ -7,6 +7,7 @@ import ( "net/http" configv1 "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1" + configv1alpha1 "github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1" discovery "k8s.io/client-go/discovery" rest "k8s.io/client-go/rest" flowcontrol "k8s.io/client-go/util/flowcontrol" @@ -15,13 +16,15 @@ import ( type Interface interface { Discovery() discovery.DiscoveryInterface ConfigV1() configv1.ConfigV1Interface + ConfigV1alpha1() configv1alpha1.ConfigV1alpha1Interface } // Clientset contains the clients for groups. Each group has exactly one // version included in a Clientset. type Clientset struct { *discovery.DiscoveryClient - configV1 *configv1.ConfigV1Client + configV1 *configv1.ConfigV1Client + configV1alpha1 *configv1alpha1.ConfigV1alpha1Client } // ConfigV1 retrieves the ConfigV1Client @@ -29,6 +32,11 @@ func (c *Clientset) ConfigV1() configv1.ConfigV1Interface { return c.configV1 } +// ConfigV1alpha1 retrieves the ConfigV1alpha1Client +func (c *Clientset) ConfigV1alpha1() configv1alpha1.ConfigV1alpha1Interface { + return c.configV1alpha1 +} + // Discovery retrieves the DiscoveryClient func (c *Clientset) Discovery() discovery.DiscoveryInterface { if c == nil { @@ -77,6 +85,10 @@ func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, if err != nil { return nil, err } + cs.configV1alpha1, err = configv1alpha1.NewForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfigAndClient(&configShallowCopy, httpClient) if err != nil { @@ -99,6 +111,7 @@ func NewForConfigOrDie(c *rest.Config) *Clientset { func New(c rest.Interface) *Clientset { var cs Clientset cs.configV1 = configv1.New(c) + cs.configV1alpha1 = configv1alpha1.New(c) cs.DiscoveryClient = discovery.NewDiscoveryClient(c) return &cs diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/scheme/register.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/scheme/register.go index 00d32306d..6340555dd 100644 --- a/vendor/github.com/openshift/client-go/config/clientset/versioned/scheme/register.go +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/scheme/register.go @@ -4,6 +4,7 @@ package scheme import ( configv1 "github.com/openshift/api/config/v1" + configv1alpha1 "github.com/openshift/api/config/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" schema "k8s.io/apimachinery/pkg/runtime/schema" @@ -16,19 +17,20 @@ var Codecs = serializer.NewCodecFactory(Scheme) var ParameterCodec = runtime.NewParameterCodec(Scheme) var localSchemeBuilder = runtime.SchemeBuilder{ configv1.AddToScheme, + configv1alpha1.AddToScheme, } // AddToScheme adds all types of this clientset into the given scheme. This allows composition // of clientsets, like in: // -// import ( -// "k8s.io/client-go/kubernetes" -// clientsetscheme "k8s.io/client-go/kubernetes/scheme" -// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" -// ) +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) // -// kclientset, _ := kubernetes.NewForConfig(c) -// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) // // After this, RawExtensions in Kubernetes types will serialize kube-aggregator types // correctly. diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/config_client.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/config_client.go new file mode 100644 index 000000000..d84833dd1 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/config_client.go @@ -0,0 +1,91 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "net/http" + + v1alpha1 "github.com/openshift/api/config/v1alpha1" + "github.com/openshift/client-go/config/clientset/versioned/scheme" + rest "k8s.io/client-go/rest" +) + +type ConfigV1alpha1Interface interface { + RESTClient() rest.Interface + InsightsDataGathersGetter +} + +// ConfigV1alpha1Client is used to interact with features provided by the config.openshift.io group. +type ConfigV1alpha1Client struct { + restClient rest.Interface +} + +func (c *ConfigV1alpha1Client) InsightsDataGathers() InsightsDataGatherInterface { + return newInsightsDataGathers(c) +} + +// NewForConfig creates a new ConfigV1alpha1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*ConfigV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new ConfigV1alpha1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*ConfigV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) + if err != nil { + return nil, err + } + return &ConfigV1alpha1Client{client}, nil +} + +// NewForConfigOrDie creates a new ConfigV1alpha1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *ConfigV1alpha1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new ConfigV1alpha1Client for the given RESTClient. +func New(c rest.Interface) *ConfigV1alpha1Client { + return &ConfigV1alpha1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1alpha1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *ConfigV1alpha1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/doc.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/doc.go new file mode 100644 index 000000000..93a7ca4e0 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/doc.go @@ -0,0 +1,4 @@ +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1alpha1 diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/generated_expansion.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/generated_expansion.go new file mode 100644 index 000000000..c809c52fa --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/generated_expansion.go @@ -0,0 +1,5 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +type InsightsDataGatherExpansion interface{} diff --git a/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/insightsdatagather.go b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/insightsdatagather.go new file mode 100644 index 000000000..e3e66488a --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1/insightsdatagather.go @@ -0,0 +1,227 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + json "encoding/json" + "fmt" + "time" + + v1alpha1 "github.com/openshift/api/config/v1alpha1" + configv1alpha1 "github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1" + scheme "github.com/openshift/client-go/config/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// InsightsDataGathersGetter has a method to return a InsightsDataGatherInterface. +// A group's client should implement this interface. +type InsightsDataGathersGetter interface { + InsightsDataGathers() InsightsDataGatherInterface +} + +// InsightsDataGatherInterface has methods to work with InsightsDataGather resources. +type InsightsDataGatherInterface interface { + Create(ctx context.Context, insightsDataGather *v1alpha1.InsightsDataGather, opts v1.CreateOptions) (*v1alpha1.InsightsDataGather, error) + Update(ctx context.Context, insightsDataGather *v1alpha1.InsightsDataGather, opts v1.UpdateOptions) (*v1alpha1.InsightsDataGather, error) + UpdateStatus(ctx context.Context, insightsDataGather *v1alpha1.InsightsDataGather, opts v1.UpdateOptions) (*v1alpha1.InsightsDataGather, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.InsightsDataGather, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.InsightsDataGatherList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.InsightsDataGather, err error) + Apply(ctx context.Context, insightsDataGather *configv1alpha1.InsightsDataGatherApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.InsightsDataGather, err error) + ApplyStatus(ctx context.Context, insightsDataGather *configv1alpha1.InsightsDataGatherApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.InsightsDataGather, err error) + InsightsDataGatherExpansion +} + +// insightsDataGathers implements InsightsDataGatherInterface +type insightsDataGathers struct { + client rest.Interface +} + +// newInsightsDataGathers returns a InsightsDataGathers +func newInsightsDataGathers(c *ConfigV1alpha1Client) *insightsDataGathers { + return &insightsDataGathers{ + client: c.RESTClient(), + } +} + +// Get takes name of the insightsDataGather, and returns the corresponding insightsDataGather object, and an error if there is any. +func (c *insightsDataGathers) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.InsightsDataGather, err error) { + result = &v1alpha1.InsightsDataGather{} + err = c.client.Get(). + Resource("insightsdatagathers"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of InsightsDataGathers that match those selectors. +func (c *insightsDataGathers) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.InsightsDataGatherList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.InsightsDataGatherList{} + err = c.client.Get(). + Resource("insightsdatagathers"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested insightsDataGathers. +func (c *insightsDataGathers) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("insightsdatagathers"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a insightsDataGather and creates it. Returns the server's representation of the insightsDataGather, and an error, if there is any. +func (c *insightsDataGathers) Create(ctx context.Context, insightsDataGather *v1alpha1.InsightsDataGather, opts v1.CreateOptions) (result *v1alpha1.InsightsDataGather, err error) { + result = &v1alpha1.InsightsDataGather{} + err = c.client.Post(). + Resource("insightsdatagathers"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(insightsDataGather). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a insightsDataGather and updates it. Returns the server's representation of the insightsDataGather, and an error, if there is any. +func (c *insightsDataGathers) Update(ctx context.Context, insightsDataGather *v1alpha1.InsightsDataGather, opts v1.UpdateOptions) (result *v1alpha1.InsightsDataGather, err error) { + result = &v1alpha1.InsightsDataGather{} + err = c.client.Put(). + Resource("insightsdatagathers"). + Name(insightsDataGather.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(insightsDataGather). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *insightsDataGathers) UpdateStatus(ctx context.Context, insightsDataGather *v1alpha1.InsightsDataGather, opts v1.UpdateOptions) (result *v1alpha1.InsightsDataGather, err error) { + result = &v1alpha1.InsightsDataGather{} + err = c.client.Put(). + Resource("insightsdatagathers"). + Name(insightsDataGather.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(insightsDataGather). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the insightsDataGather and deletes it. Returns an error if one occurs. +func (c *insightsDataGathers) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Resource("insightsdatagathers"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *insightsDataGathers) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("insightsdatagathers"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched insightsDataGather. +func (c *insightsDataGathers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.InsightsDataGather, err error) { + result = &v1alpha1.InsightsDataGather{} + err = c.client.Patch(pt). + Resource("insightsdatagathers"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied insightsDataGather. +func (c *insightsDataGathers) Apply(ctx context.Context, insightsDataGather *configv1alpha1.InsightsDataGatherApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.InsightsDataGather, err error) { + if insightsDataGather == nil { + return nil, fmt.Errorf("insightsDataGather provided to Apply must not be nil") + } + patchOpts := opts.ToPatchOptions() + data, err := json.Marshal(insightsDataGather) + if err != nil { + return nil, err + } + name := insightsDataGather.Name + if name == nil { + return nil, fmt.Errorf("insightsDataGather.Name must be provided to Apply") + } + result = &v1alpha1.InsightsDataGather{} + err = c.client.Patch(types.ApplyPatchType). + Resource("insightsdatagathers"). + Name(*name). + VersionedParams(&patchOpts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} + +// ApplyStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). +func (c *insightsDataGathers) ApplyStatus(ctx context.Context, insightsDataGather *configv1alpha1.InsightsDataGatherApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.InsightsDataGather, err error) { + if insightsDataGather == nil { + return nil, fmt.Errorf("insightsDataGather provided to Apply must not be nil") + } + patchOpts := opts.ToPatchOptions() + data, err := json.Marshal(insightsDataGather) + if err != nil { + return nil, err + } + + name := insightsDataGather.Name + if name == nil { + return nil, fmt.Errorf("insightsDataGather.Name must be provided to Apply") + } + + result = &v1alpha1.InsightsDataGather{} + err = c.client.Patch(types.ApplyPatchType). + Resource("insightsdatagathers"). + Name(*name). + SubResource("status"). + VersionedParams(&patchOpts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/interface.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/interface.go index 544faaaea..3e7e6e8d3 100644 --- a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/interface.go +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/interface.go @@ -4,6 +4,7 @@ package config import ( v1 "github.com/openshift/client-go/config/informers/externalversions/config/v1" + v1alpha1 "github.com/openshift/client-go/config/informers/externalversions/config/v1alpha1" internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces" ) @@ -11,6 +12,8 @@ import ( type Interface interface { // V1 provides access to shared informers for resources in V1. V1() v1.Interface + // V1alpha1 provides access to shared informers for resources in V1alpha1. + V1alpha1() v1alpha1.Interface } type group struct { @@ -28,3 +31,8 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList func (g *group) V1() v1.Interface { return v1.New(g.factory, g.namespace, g.tweakListOptions) } + +// V1alpha1 returns a new v1alpha1.Interface. +func (g *group) V1alpha1() v1alpha1.Interface { + return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1alpha1/insightsdatagather.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1alpha1/insightsdatagather.go new file mode 100644 index 000000000..22a41d363 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1alpha1/insightsdatagather.go @@ -0,0 +1,73 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + time "time" + + configv1alpha1 "github.com/openshift/api/config/v1alpha1" + versioned "github.com/openshift/client-go/config/clientset/versioned" + internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces" + v1alpha1 "github.com/openshift/client-go/config/listers/config/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" +) + +// InsightsDataGatherInformer provides access to a shared informer and lister for +// InsightsDataGathers. +type InsightsDataGatherInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.InsightsDataGatherLister +} + +type insightsDataGatherInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewInsightsDataGatherInformer constructs a new informer for InsightsDataGather type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewInsightsDataGatherInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredInsightsDataGatherInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredInsightsDataGatherInformer constructs a new informer for InsightsDataGather type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredInsightsDataGatherInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1alpha1().InsightsDataGathers().List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ConfigV1alpha1().InsightsDataGathers().Watch(context.TODO(), options) + }, + }, + &configv1alpha1.InsightsDataGather{}, + resyncPeriod, + indexers, + ) +} + +func (f *insightsDataGatherInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredInsightsDataGatherInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *insightsDataGatherInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&configv1alpha1.InsightsDataGather{}, f.defaultInformer) +} + +func (f *insightsDataGatherInformer) Lister() v1alpha1.InsightsDataGatherLister { + return v1alpha1.NewInsightsDataGatherLister(f.Informer().GetIndexer()) +} diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1alpha1/interface.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1alpha1/interface.go new file mode 100644 index 000000000..b511e60ef --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/config/v1alpha1/interface.go @@ -0,0 +1,29 @@ +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + internalinterfaces "github.com/openshift/client-go/config/informers/externalversions/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // InsightsDataGathers returns a InsightsDataGatherInformer. + InsightsDataGathers() InsightsDataGatherInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// InsightsDataGathers returns a InsightsDataGatherInformer. +func (v *version) InsightsDataGathers() InsightsDataGatherInformer { + return &insightsDataGatherInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/github.com/openshift/client-go/config/informers/externalversions/generic.go b/vendor/github.com/openshift/client-go/config/informers/externalversions/generic.go index a9250c408..868af7dc8 100644 --- a/vendor/github.com/openshift/client-go/config/informers/externalversions/generic.go +++ b/vendor/github.com/openshift/client-go/config/informers/externalversions/generic.go @@ -6,6 +6,7 @@ import ( "fmt" v1 "github.com/openshift/api/config/v1" + v1alpha1 "github.com/openshift/api/config/v1alpha1" schema "k8s.io/apimachinery/pkg/runtime/schema" cache "k8s.io/client-go/tools/cache" ) @@ -80,6 +81,10 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource case v1.SchemeGroupVersion.WithResource("schedulers"): return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1().Schedulers().Informer()}, nil + // Group=config.openshift.io, Version=v1alpha1 + case v1alpha1.SchemeGroupVersion.WithResource("insightsdatagathers"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Config().V1alpha1().InsightsDataGathers().Informer()}, nil + } return nil, fmt.Errorf("no informer found for %v", resource) diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1alpha1/expansion_generated.go b/vendor/github.com/openshift/client-go/config/listers/config/v1alpha1/expansion_generated.go new file mode 100644 index 000000000..efdc4fbef --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/listers/config/v1alpha1/expansion_generated.go @@ -0,0 +1,7 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +// InsightsDataGatherListerExpansion allows custom methods to be added to +// InsightsDataGatherLister. +type InsightsDataGatherListerExpansion interface{} diff --git a/vendor/github.com/openshift/client-go/config/listers/config/v1alpha1/insightsdatagather.go b/vendor/github.com/openshift/client-go/config/listers/config/v1alpha1/insightsdatagather.go new file mode 100644 index 000000000..887f066e4 --- /dev/null +++ b/vendor/github.com/openshift/client-go/config/listers/config/v1alpha1/insightsdatagather.go @@ -0,0 +1,52 @@ +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "github.com/openshift/api/config/v1alpha1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// InsightsDataGatherLister helps list InsightsDataGathers. +// All objects returned here must be treated as read-only. +type InsightsDataGatherLister interface { + // List lists all InsightsDataGathers in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1alpha1.InsightsDataGather, err error) + // Get retrieves the InsightsDataGather from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1alpha1.InsightsDataGather, error) + InsightsDataGatherListerExpansion +} + +// insightsDataGatherLister implements the InsightsDataGatherLister interface. +type insightsDataGatherLister struct { + indexer cache.Indexer +} + +// NewInsightsDataGatherLister returns a new InsightsDataGatherLister. +func NewInsightsDataGatherLister(indexer cache.Indexer) InsightsDataGatherLister { + return &insightsDataGatherLister{indexer: indexer} +} + +// List lists all InsightsDataGathers in the indexer. +func (s *insightsDataGatherLister) List(selector labels.Selector) (ret []*v1alpha1.InsightsDataGather, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.InsightsDataGather)) + }) + return ret, err +} + +// Get retrieves the InsightsDataGather from the index for a given name. +func (s *insightsDataGatherLister) Get(name string) (*v1alpha1.InsightsDataGather, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("insightsdatagather"), name) + } + return obj.(*v1alpha1.InsightsDataGather), nil +} diff --git a/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/image.go b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/image.go new file mode 100644 index 000000000..a18bea315 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/image.go @@ -0,0 +1,330 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + apiimagev1 "github.com/openshift/api/image/v1" + internal "github.com/openshift/client-go/image/applyconfigurations/internal" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ImageApplyConfiguration represents an declarative configuration of the Image type for use +// with apply. +type ImageApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + DockerImageReference *string `json:"dockerImageReference,omitempty"` + DockerImageMetadata *runtime.RawExtension `json:"dockerImageMetadata,omitempty"` + DockerImageMetadataVersion *string `json:"dockerImageMetadataVersion,omitempty"` + DockerImageManifest *string `json:"dockerImageManifest,omitempty"` + DockerImageLayers []ImageLayerApplyConfiguration `json:"dockerImageLayers,omitempty"` + Signatures []ImageSignatureApplyConfiguration `json:"signatures,omitempty"` + DockerImageSignatures [][]byte `json:"dockerImageSignatures,omitempty"` + DockerImageManifestMediaType *string `json:"dockerImageManifestMediaType,omitempty"` + DockerImageConfig *string `json:"dockerImageConfig,omitempty"` + DockerImageManifests []ImageManifestApplyConfiguration `json:"dockerImageManifests,omitempty"` +} + +// Image constructs an declarative configuration of the Image type for use with +// apply. +func Image(name string) *ImageApplyConfiguration { + b := &ImageApplyConfiguration{} + b.WithName(name) + b.WithKind("Image") + b.WithAPIVersion("image.openshift.io/v1") + return b +} + +// ExtractImage extracts the applied configuration owned by fieldManager from +// image. If no managedFields are found in image for fieldManager, a +// ImageApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// image must be a unmodified Image API object that was retrieved from the Kubernetes API. +// ExtractImage provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractImage(image *apiimagev1.Image, fieldManager string) (*ImageApplyConfiguration, error) { + return extractImage(image, fieldManager, "") +} + +// ExtractImageStatus is the same as ExtractImage except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractImageStatus(image *apiimagev1.Image, fieldManager string) (*ImageApplyConfiguration, error) { + return extractImage(image, fieldManager, "status") +} + +func extractImage(image *apiimagev1.Image, fieldManager string, subresource string) (*ImageApplyConfiguration, error) { + b := &ImageApplyConfiguration{} + err := managedfields.ExtractInto(image, internal.Parser().Type("com.github.openshift.api.image.v1.Image"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(image.Name) + + b.WithKind("Image") + b.WithAPIVersion("image.openshift.io/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *ImageApplyConfiguration) WithKind(value string) *ImageApplyConfiguration { + b.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *ImageApplyConfiguration) WithAPIVersion(value string) *ImageApplyConfiguration { + b.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ImageApplyConfiguration) WithName(value string) *ImageApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *ImageApplyConfiguration) WithGenerateName(value string) *ImageApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ImageApplyConfiguration) WithNamespace(value string) *ImageApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *ImageApplyConfiguration) WithUID(value types.UID) *ImageApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *ImageApplyConfiguration) WithResourceVersion(value string) *ImageApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *ImageApplyConfiguration) WithGeneration(value int64) *ImageApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *ImageApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ImageApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *ImageApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ImageApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *ImageApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ImageApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *ImageApplyConfiguration) WithLabels(entries map[string]string) *ImageApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Labels == nil && len(entries) > 0 { + b.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *ImageApplyConfiguration) WithAnnotations(entries map[string]string) *ImageApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Annotations == nil && len(entries) > 0 { + b.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *ImageApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ImageApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.OwnerReferences = append(b.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *ImageApplyConfiguration) WithFinalizers(values ...string) *ImageApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.Finalizers = append(b.Finalizers, values[i]) + } + return b +} + +func (b *ImageApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithDockerImageReference sets the DockerImageReference field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DockerImageReference field is set to the value of the last call. +func (b *ImageApplyConfiguration) WithDockerImageReference(value string) *ImageApplyConfiguration { + b.DockerImageReference = &value + return b +} + +// WithDockerImageMetadata sets the DockerImageMetadata field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DockerImageMetadata field is set to the value of the last call. +func (b *ImageApplyConfiguration) WithDockerImageMetadata(value runtime.RawExtension) *ImageApplyConfiguration { + b.DockerImageMetadata = &value + return b +} + +// WithDockerImageMetadataVersion sets the DockerImageMetadataVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DockerImageMetadataVersion field is set to the value of the last call. +func (b *ImageApplyConfiguration) WithDockerImageMetadataVersion(value string) *ImageApplyConfiguration { + b.DockerImageMetadataVersion = &value + return b +} + +// WithDockerImageManifest sets the DockerImageManifest field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DockerImageManifest field is set to the value of the last call. +func (b *ImageApplyConfiguration) WithDockerImageManifest(value string) *ImageApplyConfiguration { + b.DockerImageManifest = &value + return b +} + +// WithDockerImageLayers adds the given value to the DockerImageLayers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the DockerImageLayers field. +func (b *ImageApplyConfiguration) WithDockerImageLayers(values ...*ImageLayerApplyConfiguration) *ImageApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithDockerImageLayers") + } + b.DockerImageLayers = append(b.DockerImageLayers, *values[i]) + } + return b +} + +// WithSignatures adds the given value to the Signatures field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Signatures field. +func (b *ImageApplyConfiguration) WithSignatures(values ...*ImageSignatureApplyConfiguration) *ImageApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithSignatures") + } + b.Signatures = append(b.Signatures, *values[i]) + } + return b +} + +// WithDockerImageSignatures adds the given value to the DockerImageSignatures field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the DockerImageSignatures field. +func (b *ImageApplyConfiguration) WithDockerImageSignatures(values ...[]byte) *ImageApplyConfiguration { + for i := range values { + b.DockerImageSignatures = append(b.DockerImageSignatures, values[i]) + } + return b +} + +// WithDockerImageManifestMediaType sets the DockerImageManifestMediaType field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DockerImageManifestMediaType field is set to the value of the last call. +func (b *ImageApplyConfiguration) WithDockerImageManifestMediaType(value string) *ImageApplyConfiguration { + b.DockerImageManifestMediaType = &value + return b +} + +// WithDockerImageConfig sets the DockerImageConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DockerImageConfig field is set to the value of the last call. +func (b *ImageApplyConfiguration) WithDockerImageConfig(value string) *ImageApplyConfiguration { + b.DockerImageConfig = &value + return b +} + +// WithDockerImageManifests adds the given value to the DockerImageManifests field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the DockerImageManifests field. +func (b *ImageApplyConfiguration) WithDockerImageManifests(values ...*ImageManifestApplyConfiguration) *ImageApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithDockerImageManifests") + } + b.DockerImageManifests = append(b.DockerImageManifests, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/imagelayer.go b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/imagelayer.go new file mode 100644 index 000000000..e03e79a31 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/imagelayer.go @@ -0,0 +1,41 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ImageLayerApplyConfiguration represents an declarative configuration of the ImageLayer type for use +// with apply. +type ImageLayerApplyConfiguration struct { + Name *string `json:"name,omitempty"` + LayerSize *int64 `json:"size,omitempty"` + MediaType *string `json:"mediaType,omitempty"` +} + +// ImageLayerApplyConfiguration constructs an declarative configuration of the ImageLayer type for use with +// apply. +func ImageLayer() *ImageLayerApplyConfiguration { + return &ImageLayerApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ImageLayerApplyConfiguration) WithName(value string) *ImageLayerApplyConfiguration { + b.Name = &value + return b +} + +// WithLayerSize sets the LayerSize field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LayerSize field is set to the value of the last call. +func (b *ImageLayerApplyConfiguration) WithLayerSize(value int64) *ImageLayerApplyConfiguration { + b.LayerSize = &value + return b +} + +// WithMediaType sets the MediaType field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MediaType field is set to the value of the last call. +func (b *ImageLayerApplyConfiguration) WithMediaType(value string) *ImageLayerApplyConfiguration { + b.MediaType = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/imagelookuppolicy.go b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/imagelookuppolicy.go new file mode 100644 index 000000000..db4336f30 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/imagelookuppolicy.go @@ -0,0 +1,23 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ImageLookupPolicyApplyConfiguration represents an declarative configuration of the ImageLookupPolicy type for use +// with apply. +type ImageLookupPolicyApplyConfiguration struct { + Local *bool `json:"local,omitempty"` +} + +// ImageLookupPolicyApplyConfiguration constructs an declarative configuration of the ImageLookupPolicy type for use with +// apply. +func ImageLookupPolicy() *ImageLookupPolicyApplyConfiguration { + return &ImageLookupPolicyApplyConfiguration{} +} + +// WithLocal sets the Local field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Local field is set to the value of the last call. +func (b *ImageLookupPolicyApplyConfiguration) WithLocal(value bool) *ImageLookupPolicyApplyConfiguration { + b.Local = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/imagemanifest.go b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/imagemanifest.go new file mode 100644 index 000000000..34f0ac8ee --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/imagemanifest.go @@ -0,0 +1,68 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ImageManifestApplyConfiguration represents an declarative configuration of the ImageManifest type for use +// with apply. +type ImageManifestApplyConfiguration struct { + Digest *string `json:"digest,omitempty"` + MediaType *string `json:"mediaType,omitempty"` + ManifestSize *int64 `json:"manifestSize,omitempty"` + Architecture *string `json:"architecture,omitempty"` + OS *string `json:"os,omitempty"` + Variant *string `json:"variant,omitempty"` +} + +// ImageManifestApplyConfiguration constructs an declarative configuration of the ImageManifest type for use with +// apply. +func ImageManifest() *ImageManifestApplyConfiguration { + return &ImageManifestApplyConfiguration{} +} + +// WithDigest sets the Digest field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Digest field is set to the value of the last call. +func (b *ImageManifestApplyConfiguration) WithDigest(value string) *ImageManifestApplyConfiguration { + b.Digest = &value + return b +} + +// WithMediaType sets the MediaType field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MediaType field is set to the value of the last call. +func (b *ImageManifestApplyConfiguration) WithMediaType(value string) *ImageManifestApplyConfiguration { + b.MediaType = &value + return b +} + +// WithManifestSize sets the ManifestSize field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ManifestSize field is set to the value of the last call. +func (b *ImageManifestApplyConfiguration) WithManifestSize(value int64) *ImageManifestApplyConfiguration { + b.ManifestSize = &value + return b +} + +// WithArchitecture sets the Architecture field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Architecture field is set to the value of the last call. +func (b *ImageManifestApplyConfiguration) WithArchitecture(value string) *ImageManifestApplyConfiguration { + b.Architecture = &value + return b +} + +// WithOS sets the OS field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the OS field is set to the value of the last call. +func (b *ImageManifestApplyConfiguration) WithOS(value string) *ImageManifestApplyConfiguration { + b.OS = &value + return b +} + +// WithVariant sets the Variant field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Variant field is set to the value of the last call. +func (b *ImageManifestApplyConfiguration) WithVariant(value string) *ImageManifestApplyConfiguration { + b.Variant = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/imagesignature.go b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/imagesignature.go new file mode 100644 index 000000000..f3995b240 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/imagesignature.go @@ -0,0 +1,269 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ImageSignatureApplyConfiguration represents an declarative configuration of the ImageSignature type for use +// with apply. +type ImageSignatureApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Type *string `json:"type,omitempty"` + Content []byte `json:"content,omitempty"` + Conditions []SignatureConditionApplyConfiguration `json:"conditions,omitempty"` + ImageIdentity *string `json:"imageIdentity,omitempty"` + SignedClaims map[string]string `json:"signedClaims,omitempty"` + Created *metav1.Time `json:"created,omitempty"` + IssuedBy *SignatureIssuerApplyConfiguration `json:"issuedBy,omitempty"` + IssuedTo *SignatureSubjectApplyConfiguration `json:"issuedTo,omitempty"` +} + +// ImageSignature constructs an declarative configuration of the ImageSignature type for use with +// apply. +func ImageSignature(name string) *ImageSignatureApplyConfiguration { + b := &ImageSignatureApplyConfiguration{} + b.WithName(name) + b.WithKind("ImageSignature") + b.WithAPIVersion("image.openshift.io/v1") + return b +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *ImageSignatureApplyConfiguration) WithKind(value string) *ImageSignatureApplyConfiguration { + b.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *ImageSignatureApplyConfiguration) WithAPIVersion(value string) *ImageSignatureApplyConfiguration { + b.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ImageSignatureApplyConfiguration) WithName(value string) *ImageSignatureApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *ImageSignatureApplyConfiguration) WithGenerateName(value string) *ImageSignatureApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ImageSignatureApplyConfiguration) WithNamespace(value string) *ImageSignatureApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *ImageSignatureApplyConfiguration) WithUID(value types.UID) *ImageSignatureApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *ImageSignatureApplyConfiguration) WithResourceVersion(value string) *ImageSignatureApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *ImageSignatureApplyConfiguration) WithGeneration(value int64) *ImageSignatureApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *ImageSignatureApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ImageSignatureApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *ImageSignatureApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ImageSignatureApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *ImageSignatureApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ImageSignatureApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *ImageSignatureApplyConfiguration) WithLabels(entries map[string]string) *ImageSignatureApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Labels == nil && len(entries) > 0 { + b.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *ImageSignatureApplyConfiguration) WithAnnotations(entries map[string]string) *ImageSignatureApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Annotations == nil && len(entries) > 0 { + b.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *ImageSignatureApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ImageSignatureApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.OwnerReferences = append(b.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *ImageSignatureApplyConfiguration) WithFinalizers(values ...string) *ImageSignatureApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.Finalizers = append(b.Finalizers, values[i]) + } + return b +} + +func (b *ImageSignatureApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *ImageSignatureApplyConfiguration) WithType(value string) *ImageSignatureApplyConfiguration { + b.Type = &value + return b +} + +// WithContent adds the given value to the Content field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Content field. +func (b *ImageSignatureApplyConfiguration) WithContent(values ...byte) *ImageSignatureApplyConfiguration { + for i := range values { + b.Content = append(b.Content, values[i]) + } + return b +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *ImageSignatureApplyConfiguration) WithConditions(values ...*SignatureConditionApplyConfiguration) *ImageSignatureApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.Conditions = append(b.Conditions, *values[i]) + } + return b +} + +// WithImageIdentity sets the ImageIdentity field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ImageIdentity field is set to the value of the last call. +func (b *ImageSignatureApplyConfiguration) WithImageIdentity(value string) *ImageSignatureApplyConfiguration { + b.ImageIdentity = &value + return b +} + +// WithSignedClaims puts the entries into the SignedClaims field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the SignedClaims field, +// overwriting an existing map entries in SignedClaims field with the same key. +func (b *ImageSignatureApplyConfiguration) WithSignedClaims(entries map[string]string) *ImageSignatureApplyConfiguration { + if b.SignedClaims == nil && len(entries) > 0 { + b.SignedClaims = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.SignedClaims[k] = v + } + return b +} + +// WithCreated sets the Created field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Created field is set to the value of the last call. +func (b *ImageSignatureApplyConfiguration) WithCreated(value metav1.Time) *ImageSignatureApplyConfiguration { + b.Created = &value + return b +} + +// WithIssuedBy sets the IssuedBy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the IssuedBy field is set to the value of the last call. +func (b *ImageSignatureApplyConfiguration) WithIssuedBy(value *SignatureIssuerApplyConfiguration) *ImageSignatureApplyConfiguration { + b.IssuedBy = value + return b +} + +// WithIssuedTo sets the IssuedTo field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the IssuedTo field is set to the value of the last call. +func (b *ImageSignatureApplyConfiguration) WithIssuedTo(value *SignatureSubjectApplyConfiguration) *ImageSignatureApplyConfiguration { + b.IssuedTo = value + return b +} diff --git a/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/imagestream.go b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/imagestream.go new file mode 100644 index 000000000..0d72f0cf8 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/imagestream.go @@ -0,0 +1,242 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + apiimagev1 "github.com/openshift/api/image/v1" + internal "github.com/openshift/client-go/image/applyconfigurations/internal" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ImageStreamApplyConfiguration represents an declarative configuration of the ImageStream type for use +// with apply. +type ImageStreamApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ImageStreamSpecApplyConfiguration `json:"spec,omitempty"` + Status *ImageStreamStatusApplyConfiguration `json:"status,omitempty"` +} + +// ImageStream constructs an declarative configuration of the ImageStream type for use with +// apply. +func ImageStream(name, namespace string) *ImageStreamApplyConfiguration { + b := &ImageStreamApplyConfiguration{} + b.WithName(name) + b.WithNamespace(namespace) + b.WithKind("ImageStream") + b.WithAPIVersion("image.openshift.io/v1") + return b +} + +// ExtractImageStream extracts the applied configuration owned by fieldManager from +// imageStream. If no managedFields are found in imageStream for fieldManager, a +// ImageStreamApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// imageStream must be a unmodified ImageStream API object that was retrieved from the Kubernetes API. +// ExtractImageStream provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractImageStream(imageStream *apiimagev1.ImageStream, fieldManager string) (*ImageStreamApplyConfiguration, error) { + return extractImageStream(imageStream, fieldManager, "") +} + +// ExtractImageStreamStatus is the same as ExtractImageStream except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractImageStreamStatus(imageStream *apiimagev1.ImageStream, fieldManager string) (*ImageStreamApplyConfiguration, error) { + return extractImageStream(imageStream, fieldManager, "status") +} + +func extractImageStream(imageStream *apiimagev1.ImageStream, fieldManager string, subresource string) (*ImageStreamApplyConfiguration, error) { + b := &ImageStreamApplyConfiguration{} + err := managedfields.ExtractInto(imageStream, internal.Parser().Type("com.github.openshift.api.image.v1.ImageStream"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(imageStream.Name) + b.WithNamespace(imageStream.Namespace) + + b.WithKind("ImageStream") + b.WithAPIVersion("image.openshift.io/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *ImageStreamApplyConfiguration) WithKind(value string) *ImageStreamApplyConfiguration { + b.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *ImageStreamApplyConfiguration) WithAPIVersion(value string) *ImageStreamApplyConfiguration { + b.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ImageStreamApplyConfiguration) WithName(value string) *ImageStreamApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *ImageStreamApplyConfiguration) WithGenerateName(value string) *ImageStreamApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ImageStreamApplyConfiguration) WithNamespace(value string) *ImageStreamApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *ImageStreamApplyConfiguration) WithUID(value types.UID) *ImageStreamApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *ImageStreamApplyConfiguration) WithResourceVersion(value string) *ImageStreamApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *ImageStreamApplyConfiguration) WithGeneration(value int64) *ImageStreamApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *ImageStreamApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ImageStreamApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *ImageStreamApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ImageStreamApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *ImageStreamApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ImageStreamApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *ImageStreamApplyConfiguration) WithLabels(entries map[string]string) *ImageStreamApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Labels == nil && len(entries) > 0 { + b.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *ImageStreamApplyConfiguration) WithAnnotations(entries map[string]string) *ImageStreamApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Annotations == nil && len(entries) > 0 { + b.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *ImageStreamApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ImageStreamApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.OwnerReferences = append(b.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *ImageStreamApplyConfiguration) WithFinalizers(values ...string) *ImageStreamApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.Finalizers = append(b.Finalizers, values[i]) + } + return b +} + +func (b *ImageStreamApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *ImageStreamApplyConfiguration) WithSpec(value *ImageStreamSpecApplyConfiguration) *ImageStreamApplyConfiguration { + b.Spec = value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *ImageStreamApplyConfiguration) WithStatus(value *ImageStreamStatusApplyConfiguration) *ImageStreamApplyConfiguration { + b.Status = value + return b +} diff --git a/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/imagestreammapping.go b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/imagestreammapping.go new file mode 100644 index 000000000..627b657f1 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/imagestreammapping.go @@ -0,0 +1,242 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + apiimagev1 "github.com/openshift/api/image/v1" + internal "github.com/openshift/client-go/image/applyconfigurations/internal" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ImageStreamMappingApplyConfiguration represents an declarative configuration of the ImageStreamMapping type for use +// with apply. +type ImageStreamMappingApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Image *ImageApplyConfiguration `json:"image,omitempty"` + Tag *string `json:"tag,omitempty"` +} + +// ImageStreamMapping constructs an declarative configuration of the ImageStreamMapping type for use with +// apply. +func ImageStreamMapping(name, namespace string) *ImageStreamMappingApplyConfiguration { + b := &ImageStreamMappingApplyConfiguration{} + b.WithName(name) + b.WithNamespace(namespace) + b.WithKind("ImageStreamMapping") + b.WithAPIVersion("image.openshift.io/v1") + return b +} + +// ExtractImageStreamMapping extracts the applied configuration owned by fieldManager from +// imageStreamMapping. If no managedFields are found in imageStreamMapping for fieldManager, a +// ImageStreamMappingApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// imageStreamMapping must be a unmodified ImageStreamMapping API object that was retrieved from the Kubernetes API. +// ExtractImageStreamMapping provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractImageStreamMapping(imageStreamMapping *apiimagev1.ImageStreamMapping, fieldManager string) (*ImageStreamMappingApplyConfiguration, error) { + return extractImageStreamMapping(imageStreamMapping, fieldManager, "") +} + +// ExtractImageStreamMappingStatus is the same as ExtractImageStreamMapping except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractImageStreamMappingStatus(imageStreamMapping *apiimagev1.ImageStreamMapping, fieldManager string) (*ImageStreamMappingApplyConfiguration, error) { + return extractImageStreamMapping(imageStreamMapping, fieldManager, "status") +} + +func extractImageStreamMapping(imageStreamMapping *apiimagev1.ImageStreamMapping, fieldManager string, subresource string) (*ImageStreamMappingApplyConfiguration, error) { + b := &ImageStreamMappingApplyConfiguration{} + err := managedfields.ExtractInto(imageStreamMapping, internal.Parser().Type("com.github.openshift.api.image.v1.ImageStreamMapping"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(imageStreamMapping.Name) + b.WithNamespace(imageStreamMapping.Namespace) + + b.WithKind("ImageStreamMapping") + b.WithAPIVersion("image.openshift.io/v1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *ImageStreamMappingApplyConfiguration) WithKind(value string) *ImageStreamMappingApplyConfiguration { + b.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *ImageStreamMappingApplyConfiguration) WithAPIVersion(value string) *ImageStreamMappingApplyConfiguration { + b.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ImageStreamMappingApplyConfiguration) WithName(value string) *ImageStreamMappingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *ImageStreamMappingApplyConfiguration) WithGenerateName(value string) *ImageStreamMappingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ImageStreamMappingApplyConfiguration) WithNamespace(value string) *ImageStreamMappingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *ImageStreamMappingApplyConfiguration) WithUID(value types.UID) *ImageStreamMappingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *ImageStreamMappingApplyConfiguration) WithResourceVersion(value string) *ImageStreamMappingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *ImageStreamMappingApplyConfiguration) WithGeneration(value int64) *ImageStreamMappingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *ImageStreamMappingApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ImageStreamMappingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *ImageStreamMappingApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ImageStreamMappingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *ImageStreamMappingApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ImageStreamMappingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *ImageStreamMappingApplyConfiguration) WithLabels(entries map[string]string) *ImageStreamMappingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Labels == nil && len(entries) > 0 { + b.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *ImageStreamMappingApplyConfiguration) WithAnnotations(entries map[string]string) *ImageStreamMappingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Annotations == nil && len(entries) > 0 { + b.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *ImageStreamMappingApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ImageStreamMappingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.OwnerReferences = append(b.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *ImageStreamMappingApplyConfiguration) WithFinalizers(values ...string) *ImageStreamMappingApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.Finalizers = append(b.Finalizers, values[i]) + } + return b +} + +func (b *ImageStreamMappingApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithImage sets the Image field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Image field is set to the value of the last call. +func (b *ImageStreamMappingApplyConfiguration) WithImage(value *ImageApplyConfiguration) *ImageStreamMappingApplyConfiguration { + b.Image = value + return b +} + +// WithTag sets the Tag field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Tag field is set to the value of the last call. +func (b *ImageStreamMappingApplyConfiguration) WithTag(value string) *ImageStreamMappingApplyConfiguration { + b.Tag = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/imagestreamspec.go b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/imagestreamspec.go new file mode 100644 index 000000000..5239862b7 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/imagestreamspec.go @@ -0,0 +1,46 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ImageStreamSpecApplyConfiguration represents an declarative configuration of the ImageStreamSpec type for use +// with apply. +type ImageStreamSpecApplyConfiguration struct { + LookupPolicy *ImageLookupPolicyApplyConfiguration `json:"lookupPolicy,omitempty"` + DockerImageRepository *string `json:"dockerImageRepository,omitempty"` + Tags []TagReferenceApplyConfiguration `json:"tags,omitempty"` +} + +// ImageStreamSpecApplyConfiguration constructs an declarative configuration of the ImageStreamSpec type for use with +// apply. +func ImageStreamSpec() *ImageStreamSpecApplyConfiguration { + return &ImageStreamSpecApplyConfiguration{} +} + +// WithLookupPolicy sets the LookupPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LookupPolicy field is set to the value of the last call. +func (b *ImageStreamSpecApplyConfiguration) WithLookupPolicy(value *ImageLookupPolicyApplyConfiguration) *ImageStreamSpecApplyConfiguration { + b.LookupPolicy = value + return b +} + +// WithDockerImageRepository sets the DockerImageRepository field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DockerImageRepository field is set to the value of the last call. +func (b *ImageStreamSpecApplyConfiguration) WithDockerImageRepository(value string) *ImageStreamSpecApplyConfiguration { + b.DockerImageRepository = &value + return b +} + +// WithTags adds the given value to the Tags field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Tags field. +func (b *ImageStreamSpecApplyConfiguration) WithTags(values ...*TagReferenceApplyConfiguration) *ImageStreamSpecApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithTags") + } + b.Tags = append(b.Tags, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/imagestreamstatus.go b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/imagestreamstatus.go new file mode 100644 index 000000000..4035e7787 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/imagestreamstatus.go @@ -0,0 +1,46 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ImageStreamStatusApplyConfiguration represents an declarative configuration of the ImageStreamStatus type for use +// with apply. +type ImageStreamStatusApplyConfiguration struct { + DockerImageRepository *string `json:"dockerImageRepository,omitempty"` + PublicDockerImageRepository *string `json:"publicDockerImageRepository,omitempty"` + Tags []NamedTagEventListApplyConfiguration `json:"tags,omitempty"` +} + +// ImageStreamStatusApplyConfiguration constructs an declarative configuration of the ImageStreamStatus type for use with +// apply. +func ImageStreamStatus() *ImageStreamStatusApplyConfiguration { + return &ImageStreamStatusApplyConfiguration{} +} + +// WithDockerImageRepository sets the DockerImageRepository field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DockerImageRepository field is set to the value of the last call. +func (b *ImageStreamStatusApplyConfiguration) WithDockerImageRepository(value string) *ImageStreamStatusApplyConfiguration { + b.DockerImageRepository = &value + return b +} + +// WithPublicDockerImageRepository sets the PublicDockerImageRepository field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PublicDockerImageRepository field is set to the value of the last call. +func (b *ImageStreamStatusApplyConfiguration) WithPublicDockerImageRepository(value string) *ImageStreamStatusApplyConfiguration { + b.PublicDockerImageRepository = &value + return b +} + +// WithTags adds the given value to the Tags field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Tags field. +func (b *ImageStreamStatusApplyConfiguration) WithTags(values ...*NamedTagEventListApplyConfiguration) *ImageStreamStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithTags") + } + b.Tags = append(b.Tags, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/namedtageventlist.go b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/namedtageventlist.go new file mode 100644 index 000000000..9c00746ed --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/namedtageventlist.go @@ -0,0 +1,51 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// NamedTagEventListApplyConfiguration represents an declarative configuration of the NamedTagEventList type for use +// with apply. +type NamedTagEventListApplyConfiguration struct { + Tag *string `json:"tag,omitempty"` + Items []TagEventApplyConfiguration `json:"items,omitempty"` + Conditions []TagEventConditionApplyConfiguration `json:"conditions,omitempty"` +} + +// NamedTagEventListApplyConfiguration constructs an declarative configuration of the NamedTagEventList type for use with +// apply. +func NamedTagEventList() *NamedTagEventListApplyConfiguration { + return &NamedTagEventListApplyConfiguration{} +} + +// WithTag sets the Tag field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Tag field is set to the value of the last call. +func (b *NamedTagEventListApplyConfiguration) WithTag(value string) *NamedTagEventListApplyConfiguration { + b.Tag = &value + return b +} + +// WithItems adds the given value to the Items field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Items field. +func (b *NamedTagEventListApplyConfiguration) WithItems(values ...*TagEventApplyConfiguration) *NamedTagEventListApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithItems") + } + b.Items = append(b.Items, *values[i]) + } + return b +} + +// WithConditions adds the given value to the Conditions field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Conditions field. +func (b *NamedTagEventListApplyConfiguration) WithConditions(values ...*TagEventConditionApplyConfiguration) *NamedTagEventListApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithConditions") + } + b.Conditions = append(b.Conditions, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/signaturecondition.go b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/signaturecondition.go new file mode 100644 index 000000000..5b98b799b --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/signaturecondition.go @@ -0,0 +1,74 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/openshift/api/image/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// SignatureConditionApplyConfiguration represents an declarative configuration of the SignatureCondition type for use +// with apply. +type SignatureConditionApplyConfiguration struct { + Type *v1.SignatureConditionType `json:"type,omitempty"` + Status *corev1.ConditionStatus `json:"status,omitempty"` + LastProbeTime *metav1.Time `json:"lastProbeTime,omitempty"` + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` +} + +// SignatureConditionApplyConfiguration constructs an declarative configuration of the SignatureCondition type for use with +// apply. +func SignatureCondition() *SignatureConditionApplyConfiguration { + return &SignatureConditionApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *SignatureConditionApplyConfiguration) WithType(value v1.SignatureConditionType) *SignatureConditionApplyConfiguration { + b.Type = &value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *SignatureConditionApplyConfiguration) WithStatus(value corev1.ConditionStatus) *SignatureConditionApplyConfiguration { + b.Status = &value + return b +} + +// WithLastProbeTime sets the LastProbeTime field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LastProbeTime field is set to the value of the last call. +func (b *SignatureConditionApplyConfiguration) WithLastProbeTime(value metav1.Time) *SignatureConditionApplyConfiguration { + b.LastProbeTime = &value + return b +} + +// WithLastTransitionTime sets the LastTransitionTime field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LastTransitionTime field is set to the value of the last call. +func (b *SignatureConditionApplyConfiguration) WithLastTransitionTime(value metav1.Time) *SignatureConditionApplyConfiguration { + b.LastTransitionTime = &value + return b +} + +// WithReason sets the Reason field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Reason field is set to the value of the last call. +func (b *SignatureConditionApplyConfiguration) WithReason(value string) *SignatureConditionApplyConfiguration { + b.Reason = &value + return b +} + +// WithMessage sets the Message field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Message field is set to the value of the last call. +func (b *SignatureConditionApplyConfiguration) WithMessage(value string) *SignatureConditionApplyConfiguration { + b.Message = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/signaturegenericentity.go b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/signaturegenericentity.go new file mode 100644 index 000000000..ab0b0faaa --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/signaturegenericentity.go @@ -0,0 +1,32 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// SignatureGenericEntityApplyConfiguration represents an declarative configuration of the SignatureGenericEntity type for use +// with apply. +type SignatureGenericEntityApplyConfiguration struct { + Organization *string `json:"organization,omitempty"` + CommonName *string `json:"commonName,omitempty"` +} + +// SignatureGenericEntityApplyConfiguration constructs an declarative configuration of the SignatureGenericEntity type for use with +// apply. +func SignatureGenericEntity() *SignatureGenericEntityApplyConfiguration { + return &SignatureGenericEntityApplyConfiguration{} +} + +// WithOrganization sets the Organization field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Organization field is set to the value of the last call. +func (b *SignatureGenericEntityApplyConfiguration) WithOrganization(value string) *SignatureGenericEntityApplyConfiguration { + b.Organization = &value + return b +} + +// WithCommonName sets the CommonName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CommonName field is set to the value of the last call. +func (b *SignatureGenericEntityApplyConfiguration) WithCommonName(value string) *SignatureGenericEntityApplyConfiguration { + b.CommonName = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/signatureissuer.go b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/signatureissuer.go new file mode 100644 index 000000000..7b7f7fdcc --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/signatureissuer.go @@ -0,0 +1,31 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// SignatureIssuerApplyConfiguration represents an declarative configuration of the SignatureIssuer type for use +// with apply. +type SignatureIssuerApplyConfiguration struct { + SignatureGenericEntityApplyConfiguration `json:",inline"` +} + +// SignatureIssuerApplyConfiguration constructs an declarative configuration of the SignatureIssuer type for use with +// apply. +func SignatureIssuer() *SignatureIssuerApplyConfiguration { + return &SignatureIssuerApplyConfiguration{} +} + +// WithOrganization sets the Organization field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Organization field is set to the value of the last call. +func (b *SignatureIssuerApplyConfiguration) WithOrganization(value string) *SignatureIssuerApplyConfiguration { + b.Organization = &value + return b +} + +// WithCommonName sets the CommonName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CommonName field is set to the value of the last call. +func (b *SignatureIssuerApplyConfiguration) WithCommonName(value string) *SignatureIssuerApplyConfiguration { + b.CommonName = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/signaturesubject.go b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/signaturesubject.go new file mode 100644 index 000000000..9ce151975 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/signaturesubject.go @@ -0,0 +1,40 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// SignatureSubjectApplyConfiguration represents an declarative configuration of the SignatureSubject type for use +// with apply. +type SignatureSubjectApplyConfiguration struct { + SignatureGenericEntityApplyConfiguration `json:",inline"` + PublicKeyID *string `json:"publicKeyID,omitempty"` +} + +// SignatureSubjectApplyConfiguration constructs an declarative configuration of the SignatureSubject type for use with +// apply. +func SignatureSubject() *SignatureSubjectApplyConfiguration { + return &SignatureSubjectApplyConfiguration{} +} + +// WithOrganization sets the Organization field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Organization field is set to the value of the last call. +func (b *SignatureSubjectApplyConfiguration) WithOrganization(value string) *SignatureSubjectApplyConfiguration { + b.Organization = &value + return b +} + +// WithCommonName sets the CommonName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CommonName field is set to the value of the last call. +func (b *SignatureSubjectApplyConfiguration) WithCommonName(value string) *SignatureSubjectApplyConfiguration { + b.CommonName = &value + return b +} + +// WithPublicKeyID sets the PublicKeyID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PublicKeyID field is set to the value of the last call. +func (b *SignatureSubjectApplyConfiguration) WithPublicKeyID(value string) *SignatureSubjectApplyConfiguration { + b.PublicKeyID = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/tagevent.go b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/tagevent.go new file mode 100644 index 000000000..cc7722e2d --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/tagevent.go @@ -0,0 +1,54 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// TagEventApplyConfiguration represents an declarative configuration of the TagEvent type for use +// with apply. +type TagEventApplyConfiguration struct { + Created *v1.Time `json:"created,omitempty"` + DockerImageReference *string `json:"dockerImageReference,omitempty"` + Image *string `json:"image,omitempty"` + Generation *int64 `json:"generation,omitempty"` +} + +// TagEventApplyConfiguration constructs an declarative configuration of the TagEvent type for use with +// apply. +func TagEvent() *TagEventApplyConfiguration { + return &TagEventApplyConfiguration{} +} + +// WithCreated sets the Created field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Created field is set to the value of the last call. +func (b *TagEventApplyConfiguration) WithCreated(value v1.Time) *TagEventApplyConfiguration { + b.Created = &value + return b +} + +// WithDockerImageReference sets the DockerImageReference field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DockerImageReference field is set to the value of the last call. +func (b *TagEventApplyConfiguration) WithDockerImageReference(value string) *TagEventApplyConfiguration { + b.DockerImageReference = &value + return b +} + +// WithImage sets the Image field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Image field is set to the value of the last call. +func (b *TagEventApplyConfiguration) WithImage(value string) *TagEventApplyConfiguration { + b.Image = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *TagEventApplyConfiguration) WithGeneration(value int64) *TagEventApplyConfiguration { + b.Generation = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/tageventcondition.go b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/tageventcondition.go new file mode 100644 index 000000000..107560f1f --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/tageventcondition.go @@ -0,0 +1,74 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/openshift/api/image/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// TagEventConditionApplyConfiguration represents an declarative configuration of the TagEventCondition type for use +// with apply. +type TagEventConditionApplyConfiguration struct { + Type *v1.TagEventConditionType `json:"type,omitempty"` + Status *corev1.ConditionStatus `json:"status,omitempty"` + LastTransitionTime *metav1.Time `json:"lastTransitionTime,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` + Generation *int64 `json:"generation,omitempty"` +} + +// TagEventConditionApplyConfiguration constructs an declarative configuration of the TagEventCondition type for use with +// apply. +func TagEventCondition() *TagEventConditionApplyConfiguration { + return &TagEventConditionApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *TagEventConditionApplyConfiguration) WithType(value v1.TagEventConditionType) *TagEventConditionApplyConfiguration { + b.Type = &value + return b +} + +// WithStatus sets the Status field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Status field is set to the value of the last call. +func (b *TagEventConditionApplyConfiguration) WithStatus(value corev1.ConditionStatus) *TagEventConditionApplyConfiguration { + b.Status = &value + return b +} + +// WithLastTransitionTime sets the LastTransitionTime field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the LastTransitionTime field is set to the value of the last call. +func (b *TagEventConditionApplyConfiguration) WithLastTransitionTime(value metav1.Time) *TagEventConditionApplyConfiguration { + b.LastTransitionTime = &value + return b +} + +// WithReason sets the Reason field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Reason field is set to the value of the last call. +func (b *TagEventConditionApplyConfiguration) WithReason(value string) *TagEventConditionApplyConfiguration { + b.Reason = &value + return b +} + +// WithMessage sets the Message field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Message field is set to the value of the last call. +func (b *TagEventConditionApplyConfiguration) WithMessage(value string) *TagEventConditionApplyConfiguration { + b.Message = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *TagEventConditionApplyConfiguration) WithGeneration(value int64) *TagEventConditionApplyConfiguration { + b.Generation = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/tagimportpolicy.go b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/tagimportpolicy.go new file mode 100644 index 000000000..bb1f7e4d6 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/tagimportpolicy.go @@ -0,0 +1,45 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/openshift/api/image/v1" +) + +// TagImportPolicyApplyConfiguration represents an declarative configuration of the TagImportPolicy type for use +// with apply. +type TagImportPolicyApplyConfiguration struct { + Insecure *bool `json:"insecure,omitempty"` + Scheduled *bool `json:"scheduled,omitempty"` + ImportMode *v1.ImportModeType `json:"importMode,omitempty"` +} + +// TagImportPolicyApplyConfiguration constructs an declarative configuration of the TagImportPolicy type for use with +// apply. +func TagImportPolicy() *TagImportPolicyApplyConfiguration { + return &TagImportPolicyApplyConfiguration{} +} + +// WithInsecure sets the Insecure field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Insecure field is set to the value of the last call. +func (b *TagImportPolicyApplyConfiguration) WithInsecure(value bool) *TagImportPolicyApplyConfiguration { + b.Insecure = &value + return b +} + +// WithScheduled sets the Scheduled field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Scheduled field is set to the value of the last call. +func (b *TagImportPolicyApplyConfiguration) WithScheduled(value bool) *TagImportPolicyApplyConfiguration { + b.Scheduled = &value + return b +} + +// WithImportMode sets the ImportMode field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ImportMode field is set to the value of the last call. +func (b *TagImportPolicyApplyConfiguration) WithImportMode(value v1.ImportModeType) *TagImportPolicyApplyConfiguration { + b.ImportMode = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/tagreference.go b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/tagreference.go new file mode 100644 index 000000000..77c6e1163 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/tagreference.go @@ -0,0 +1,87 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/core/v1" +) + +// TagReferenceApplyConfiguration represents an declarative configuration of the TagReference type for use +// with apply. +type TagReferenceApplyConfiguration struct { + Name *string `json:"name,omitempty"` + Annotations map[string]string `json:"annotations,omitempty"` + From *v1.ObjectReference `json:"from,omitempty"` + Reference *bool `json:"reference,omitempty"` + Generation *int64 `json:"generation,omitempty"` + ImportPolicy *TagImportPolicyApplyConfiguration `json:"importPolicy,omitempty"` + ReferencePolicy *TagReferencePolicyApplyConfiguration `json:"referencePolicy,omitempty"` +} + +// TagReferenceApplyConfiguration constructs an declarative configuration of the TagReference type for use with +// apply. +func TagReference() *TagReferenceApplyConfiguration { + return &TagReferenceApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *TagReferenceApplyConfiguration) WithName(value string) *TagReferenceApplyConfiguration { + b.Name = &value + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *TagReferenceApplyConfiguration) WithAnnotations(entries map[string]string) *TagReferenceApplyConfiguration { + if b.Annotations == nil && len(entries) > 0 { + b.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Annotations[k] = v + } + return b +} + +// WithFrom sets the From field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the From field is set to the value of the last call. +func (b *TagReferenceApplyConfiguration) WithFrom(value v1.ObjectReference) *TagReferenceApplyConfiguration { + b.From = &value + return b +} + +// WithReference sets the Reference field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Reference field is set to the value of the last call. +func (b *TagReferenceApplyConfiguration) WithReference(value bool) *TagReferenceApplyConfiguration { + b.Reference = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *TagReferenceApplyConfiguration) WithGeneration(value int64) *TagReferenceApplyConfiguration { + b.Generation = &value + return b +} + +// WithImportPolicy sets the ImportPolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ImportPolicy field is set to the value of the last call. +func (b *TagReferenceApplyConfiguration) WithImportPolicy(value *TagImportPolicyApplyConfiguration) *TagReferenceApplyConfiguration { + b.ImportPolicy = value + return b +} + +// WithReferencePolicy sets the ReferencePolicy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ReferencePolicy field is set to the value of the last call. +func (b *TagReferenceApplyConfiguration) WithReferencePolicy(value *TagReferencePolicyApplyConfiguration) *TagReferenceApplyConfiguration { + b.ReferencePolicy = value + return b +} diff --git a/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/tagreferencepolicy.go b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/tagreferencepolicy.go new file mode 100644 index 000000000..4476d0099 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/applyconfigurations/image/v1/tagreferencepolicy.go @@ -0,0 +1,27 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/openshift/api/image/v1" +) + +// TagReferencePolicyApplyConfiguration represents an declarative configuration of the TagReferencePolicy type for use +// with apply. +type TagReferencePolicyApplyConfiguration struct { + Type *v1.TagReferencePolicyType `json:"type,omitempty"` +} + +// TagReferencePolicyApplyConfiguration constructs an declarative configuration of the TagReferencePolicy type for use with +// apply. +func TagReferencePolicy() *TagReferencePolicyApplyConfiguration { + return &TagReferencePolicyApplyConfiguration{} +} + +// WithType sets the Type field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Type field is set to the value of the last call. +func (b *TagReferencePolicyApplyConfiguration) WithType(value v1.TagReferencePolicyType) *TagReferencePolicyApplyConfiguration { + b.Type = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/image/applyconfigurations/internal/internal.go b/vendor/github.com/openshift/client-go/image/applyconfigurations/internal/internal.go new file mode 100644 index 000000000..c3ab732d7 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/applyconfigurations/internal/internal.go @@ -0,0 +1,598 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package internal + +import ( + "fmt" + "sync" + + typed "sigs.k8s.io/structured-merge-diff/v4/typed" +) + +func Parser() *typed.Parser { + parserOnce.Do(func() { + var err error + parser, err = typed.NewParser(schemaYAML) + if err != nil { + panic(fmt.Sprintf("Failed to parse schema: %v", err)) + } + }) + return parser +} + +var parserOnce sync.Once +var parser *typed.Parser +var schemaYAML = typed.YAMLObject(`types: +- name: com.github.openshift.api.image.v1.Image + map: + fields: + - name: apiVersion + type: + scalar: string + - name: dockerImageConfig + type: + scalar: string + - name: dockerImageLayers + type: + list: + elementType: + namedType: com.github.openshift.api.image.v1.ImageLayer + elementRelationship: atomic + - name: dockerImageManifest + type: + scalar: string + - name: dockerImageManifestMediaType + type: + scalar: string + - name: dockerImageManifests + type: + list: + elementType: + namedType: com.github.openshift.api.image.v1.ImageManifest + elementRelationship: atomic + - name: dockerImageMetadata + type: + namedType: __untyped_atomic_ + default: {} + - name: dockerImageMetadataVersion + type: + scalar: string + - name: dockerImageReference + type: + scalar: string + - name: dockerImageSignatures + type: + list: + elementType: + scalar: string + elementRelationship: atomic + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: signatures + type: + list: + elementType: + namedType: com.github.openshift.api.image.v1.ImageSignature + elementRelationship: associative + keys: + - name +- name: com.github.openshift.api.image.v1.ImageLayer + map: + fields: + - name: mediaType + type: + scalar: string + default: "" + - name: name + type: + scalar: string + default: "" + - name: size + type: + scalar: numeric + default: 0 +- name: com.github.openshift.api.image.v1.ImageLookupPolicy + map: + fields: + - name: local + type: + scalar: boolean + default: false +- name: com.github.openshift.api.image.v1.ImageManifest + map: + fields: + - name: architecture + type: + scalar: string + default: "" + - name: digest + type: + scalar: string + default: "" + - name: manifestSize + type: + scalar: numeric + default: 0 + - name: mediaType + type: + scalar: string + default: "" + - name: os + type: + scalar: string + default: "" + - name: variant + type: + scalar: string +- name: com.github.openshift.api.image.v1.ImageSignature + map: + fields: + - name: apiVersion + type: + scalar: string + - name: conditions + type: + list: + elementType: + namedType: com.github.openshift.api.image.v1.SignatureCondition + elementRelationship: associative + keys: + - type + - name: content + type: + scalar: string + - name: created + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + - name: imageIdentity + type: + scalar: string + - name: issuedBy + type: + namedType: com.github.openshift.api.image.v1.SignatureIssuer + - name: issuedTo + type: + namedType: com.github.openshift.api.image.v1.SignatureSubject + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: signedClaims + type: + map: + elementType: + scalar: string + - name: type + type: + scalar: string + default: "" +- name: com.github.openshift.api.image.v1.ImageStream + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: com.github.openshift.api.image.v1.ImageStreamSpec + default: {} + - name: status + type: + namedType: com.github.openshift.api.image.v1.ImageStreamStatus + default: {} +- name: com.github.openshift.api.image.v1.ImageStreamMapping + map: + fields: + - name: apiVersion + type: + scalar: string + - name: image + type: + namedType: com.github.openshift.api.image.v1.Image + default: {} + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: tag + type: + scalar: string + default: "" +- name: com.github.openshift.api.image.v1.ImageStreamSpec + map: + fields: + - name: dockerImageRepository + type: + scalar: string + - name: lookupPolicy + type: + namedType: com.github.openshift.api.image.v1.ImageLookupPolicy + default: {} + - name: tags + type: + list: + elementType: + namedType: com.github.openshift.api.image.v1.TagReference + elementRelationship: associative + keys: + - name +- name: com.github.openshift.api.image.v1.ImageStreamStatus + map: + fields: + - name: dockerImageRepository + type: + scalar: string + default: "" + - name: publicDockerImageRepository + type: + scalar: string + - name: tags + type: + list: + elementType: + namedType: com.github.openshift.api.image.v1.NamedTagEventList + elementRelationship: associative + keys: + - tag +- name: com.github.openshift.api.image.v1.NamedTagEventList + map: + fields: + - name: conditions + type: + list: + elementType: + namedType: com.github.openshift.api.image.v1.TagEventCondition + elementRelationship: atomic + - name: items + type: + list: + elementType: + namedType: com.github.openshift.api.image.v1.TagEvent + elementRelationship: atomic + - name: tag + type: + scalar: string + default: "" +- name: com.github.openshift.api.image.v1.SignatureCondition + map: + fields: + - name: lastProbeTime + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + default: {} + - name: lastTransitionTime + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + default: {} + - name: message + type: + scalar: string + - name: reason + type: + scalar: string + - name: status + type: + scalar: string + default: "" + - name: type + type: + scalar: string + default: "" +- name: com.github.openshift.api.image.v1.SignatureIssuer + map: + fields: + - name: commonName + type: + scalar: string + - name: organization + type: + scalar: string +- name: com.github.openshift.api.image.v1.SignatureSubject + map: + fields: + - name: commonName + type: + scalar: string + - name: organization + type: + scalar: string + - name: publicKeyID + type: + scalar: string + default: "" +- name: com.github.openshift.api.image.v1.TagEvent + map: + fields: + - name: created + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + default: {} + - name: dockerImageReference + type: + scalar: string + default: "" + - name: generation + type: + scalar: numeric + default: 0 + - name: image + type: + scalar: string + default: "" +- name: com.github.openshift.api.image.v1.TagEventCondition + map: + fields: + - name: generation + type: + scalar: numeric + default: 0 + - name: lastTransitionTime + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + default: {} + - name: message + type: + scalar: string + - name: reason + type: + scalar: string + - name: status + type: + scalar: string + default: "" + - name: type + type: + scalar: string + default: "" +- name: com.github.openshift.api.image.v1.TagImportPolicy + map: + fields: + - name: importMode + type: + scalar: string + - name: insecure + type: + scalar: boolean + - name: scheduled + type: + scalar: boolean +- name: com.github.openshift.api.image.v1.TagReference + map: + fields: + - name: annotations + type: + map: + elementType: + scalar: string + - name: from + type: + namedType: io.k8s.api.core.v1.ObjectReference + - name: generation + type: + scalar: numeric + - name: importPolicy + type: + namedType: com.github.openshift.api.image.v1.TagImportPolicy + default: {} + - name: name + type: + scalar: string + default: "" + - name: reference + type: + scalar: boolean + - name: referencePolicy + type: + namedType: com.github.openshift.api.image.v1.TagReferencePolicy + default: {} +- name: com.github.openshift.api.image.v1.TagReferencePolicy + map: + fields: + - name: type + type: + scalar: string + default: "" +- name: io.k8s.api.core.v1.ObjectReference + map: + fields: + - name: apiVersion + type: + scalar: string + - name: fieldPath + type: + scalar: string + - name: kind + type: + scalar: string + - name: name + type: + scalar: string + - name: namespace + type: + scalar: string + - name: resourceVersion + type: + scalar: string + - name: uid + type: + scalar: string + elementRelationship: atomic +- name: io.k8s.apimachinery.pkg.apis.meta.v1.FieldsV1 + map: + elementType: + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_deduced_ + elementRelationship: separable +- name: io.k8s.apimachinery.pkg.apis.meta.v1.ManagedFieldsEntry + map: + fields: + - name: apiVersion + type: + scalar: string + - name: fieldsType + type: + scalar: string + - name: fieldsV1 + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.FieldsV1 + - name: manager + type: + scalar: string + - name: operation + type: + scalar: string + - name: subresource + type: + scalar: string + - name: time + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time +- name: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + map: + fields: + - name: annotations + type: + map: + elementType: + scalar: string + - name: creationTimestamp + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + default: {} + - name: deletionGracePeriodSeconds + type: + scalar: numeric + - name: deletionTimestamp + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + - name: finalizers + type: + list: + elementType: + scalar: string + elementRelationship: associative + - name: generateName + type: + scalar: string + - name: generation + type: + scalar: numeric + - name: labels + type: + map: + elementType: + scalar: string + - name: managedFields + type: + list: + elementType: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ManagedFieldsEntry + elementRelationship: atomic + - name: name + type: + scalar: string + - name: namespace + type: + scalar: string + - name: ownerReferences + type: + list: + elementType: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.OwnerReference + elementRelationship: associative + keys: + - uid + - name: resourceVersion + type: + scalar: string + - name: selfLink + type: + scalar: string + - name: uid + type: + scalar: string +- name: io.k8s.apimachinery.pkg.apis.meta.v1.OwnerReference + map: + fields: + - name: apiVersion + type: + scalar: string + default: "" + - name: blockOwnerDeletion + type: + scalar: boolean + - name: controller + type: + scalar: boolean + - name: kind + type: + scalar: string + default: "" + - name: name + type: + scalar: string + default: "" + - name: uid + type: + scalar: string + default: "" + elementRelationship: atomic +- name: io.k8s.apimachinery.pkg.apis.meta.v1.Time + scalar: untyped +- name: io.k8s.apimachinery.pkg.runtime.RawExtension + map: + elementType: + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_deduced_ + elementRelationship: separable +- name: __untyped_atomic_ + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic +- name: __untyped_deduced_ + scalar: untyped + list: + elementType: + namedType: __untyped_atomic_ + elementRelationship: atomic + map: + elementType: + namedType: __untyped_deduced_ + elementRelationship: separable +`) diff --git a/vendor/github.com/openshift/client-go/image/clientset/versioned/clientset.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/clientset.go new file mode 100644 index 000000000..b0ebcebf5 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/clientset/versioned/clientset.go @@ -0,0 +1,105 @@ +// Code generated by client-gen. DO NOT EDIT. + +package versioned + +import ( + "fmt" + "net/http" + + imagev1 "github.com/openshift/client-go/image/clientset/versioned/typed/image/v1" + discovery "k8s.io/client-go/discovery" + rest "k8s.io/client-go/rest" + flowcontrol "k8s.io/client-go/util/flowcontrol" +) + +type Interface interface { + Discovery() discovery.DiscoveryInterface + ImageV1() imagev1.ImageV1Interface +} + +// Clientset contains the clients for groups. Each group has exactly one +// version included in a Clientset. +type Clientset struct { + *discovery.DiscoveryClient + imageV1 *imagev1.ImageV1Client +} + +// ImageV1 retrieves the ImageV1Client +func (c *Clientset) ImageV1() imagev1.ImageV1Interface { + return c.imageV1 +} + +// Discovery retrieves the DiscoveryClient +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + if c == nil { + return nil + } + return c.DiscoveryClient +} + +// NewForConfig creates a new Clientset for the given config. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfig will generate a rate-limiter in configShallowCopy. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*Clientset, error) { + configShallowCopy := *c + + if configShallowCopy.UserAgent == "" { + configShallowCopy.UserAgent = rest.DefaultKubernetesUserAgent() + } + + // share the transport between all clients + httpClient, err := rest.HTTPClientFor(&configShallowCopy) + if err != nil { + return nil, err + } + + return NewForConfigAndClient(&configShallowCopy, httpClient) +} + +// NewForConfigAndClient creates a new Clientset for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfigAndClient will generate a rate-limiter in configShallowCopy. +func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, error) { + configShallowCopy := *c + if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { + if configShallowCopy.Burst <= 0 { + return nil, fmt.Errorf("burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0") + } + configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) + } + + var cs Clientset + var err error + cs.imageV1, err = imagev1.NewForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } + + cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } + return &cs, nil +} + +// NewForConfigOrDie creates a new Clientset for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *Clientset { + cs, err := NewForConfig(c) + if err != nil { + panic(err) + } + return cs +} + +// New creates a new Clientset for the given RESTClient. +func New(c rest.Interface) *Clientset { + var cs Clientset + cs.imageV1 = imagev1.New(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClient(c) + return &cs +} diff --git a/vendor/github.com/openshift/client-go/image/clientset/versioned/doc.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/doc.go new file mode 100644 index 000000000..0e0c2a890 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/clientset/versioned/doc.go @@ -0,0 +1,4 @@ +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated clientset. +package versioned diff --git a/vendor/github.com/openshift/client-go/image/clientset/versioned/scheme/doc.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/scheme/doc.go new file mode 100644 index 000000000..14db57a58 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/clientset/versioned/scheme/doc.go @@ -0,0 +1,4 @@ +// Code generated by client-gen. DO NOT EDIT. + +// This package contains the scheme of the automatically generated clientset. +package scheme diff --git a/vendor/github.com/openshift/client-go/image/clientset/versioned/scheme/register.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/scheme/register.go new file mode 100644 index 000000000..776540484 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/clientset/versioned/scheme/register.go @@ -0,0 +1,40 @@ +// Code generated by client-gen. DO NOT EDIT. + +package scheme + +import ( + imagev1 "github.com/openshift/api/image/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var Scheme = runtime.NewScheme() +var Codecs = serializer.NewCodecFactory(Scheme) +var ParameterCodec = runtime.NewParameterCodec(Scheme) +var localSchemeBuilder = runtime.SchemeBuilder{ + imagev1.AddToScheme, +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(Scheme)) +} diff --git a/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/doc.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/doc.go new file mode 100644 index 000000000..225e6b2be --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/doc.go @@ -0,0 +1,4 @@ +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1 diff --git a/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/generated_expansion.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/generated_expansion.go new file mode 100644 index 000000000..c495ba76e --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/generated_expansion.go @@ -0,0 +1,19 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +type ImageExpansion interface{} + +type ImageSignatureExpansion interface{} + +type ImageStreamExpansion interface{} + +type ImageStreamImageExpansion interface{} + +type ImageStreamImportExpansion interface{} + +type ImageStreamMappingExpansion interface{} + +type ImageStreamTagExpansion interface{} + +type ImageTagExpansion interface{} diff --git a/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/image.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/image.go new file mode 100644 index 000000000..b65e75e36 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/image.go @@ -0,0 +1,181 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + json "encoding/json" + "fmt" + "time" + + v1 "github.com/openshift/api/image/v1" + imagev1 "github.com/openshift/client-go/image/applyconfigurations/image/v1" + scheme "github.com/openshift/client-go/image/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// ImagesGetter has a method to return a ImageInterface. +// A group's client should implement this interface. +type ImagesGetter interface { + Images() ImageInterface +} + +// ImageInterface has methods to work with Image resources. +type ImageInterface interface { + Create(ctx context.Context, image *v1.Image, opts metav1.CreateOptions) (*v1.Image, error) + Update(ctx context.Context, image *v1.Image, opts metav1.UpdateOptions) (*v1.Image, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Image, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.ImageList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Image, err error) + Apply(ctx context.Context, image *imagev1.ImageApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Image, err error) + ImageExpansion +} + +// images implements ImageInterface +type images struct { + client rest.Interface +} + +// newImages returns a Images +func newImages(c *ImageV1Client) *images { + return &images{ + client: c.RESTClient(), + } +} + +// Get takes name of the image, and returns the corresponding image object, and an error if there is any. +func (c *images) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Image, err error) { + result = &v1.Image{} + err = c.client.Get(). + Resource("images"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Images that match those selectors. +func (c *images) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ImageList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.ImageList{} + err = c.client.Get(). + Resource("images"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested images. +func (c *images) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("images"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a image and creates it. Returns the server's representation of the image, and an error, if there is any. +func (c *images) Create(ctx context.Context, image *v1.Image, opts metav1.CreateOptions) (result *v1.Image, err error) { + result = &v1.Image{} + err = c.client.Post(). + Resource("images"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(image). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a image and updates it. Returns the server's representation of the image, and an error, if there is any. +func (c *images) Update(ctx context.Context, image *v1.Image, opts metav1.UpdateOptions) (result *v1.Image, err error) { + result = &v1.Image{} + err = c.client.Put(). + Resource("images"). + Name(image.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(image). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the image and deletes it. Returns an error if one occurs. +func (c *images) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Resource("images"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *images) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("images"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched image. +func (c *images) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Image, err error) { + result = &v1.Image{} + err = c.client.Patch(pt). + Resource("images"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied image. +func (c *images) Apply(ctx context.Context, image *imagev1.ImageApplyConfiguration, opts metav1.ApplyOptions) (result *v1.Image, err error) { + if image == nil { + return nil, fmt.Errorf("image provided to Apply must not be nil") + } + patchOpts := opts.ToPatchOptions() + data, err := json.Marshal(image) + if err != nil { + return nil, err + } + name := image.Name + if name == nil { + return nil, fmt.Errorf("image.Name must be provided to Apply") + } + result = &v1.Image{} + err = c.client.Patch(types.ApplyPatchType). + Resource("images"). + Name(*name). + VersionedParams(&patchOpts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/image_client.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/image_client.go new file mode 100644 index 000000000..dfd147108 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/image_client.go @@ -0,0 +1,126 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "net/http" + + v1 "github.com/openshift/api/image/v1" + "github.com/openshift/client-go/image/clientset/versioned/scheme" + rest "k8s.io/client-go/rest" +) + +type ImageV1Interface interface { + RESTClient() rest.Interface + ImagesGetter + ImageSignaturesGetter + ImageStreamsGetter + ImageStreamImagesGetter + ImageStreamImportsGetter + ImageStreamMappingsGetter + ImageStreamTagsGetter + ImageTagsGetter +} + +// ImageV1Client is used to interact with features provided by the image.openshift.io group. +type ImageV1Client struct { + restClient rest.Interface +} + +func (c *ImageV1Client) Images() ImageInterface { + return newImages(c) +} + +func (c *ImageV1Client) ImageSignatures() ImageSignatureInterface { + return newImageSignatures(c) +} + +func (c *ImageV1Client) ImageStreams(namespace string) ImageStreamInterface { + return newImageStreams(c, namespace) +} + +func (c *ImageV1Client) ImageStreamImages(namespace string) ImageStreamImageInterface { + return newImageStreamImages(c, namespace) +} + +func (c *ImageV1Client) ImageStreamImports(namespace string) ImageStreamImportInterface { + return newImageStreamImports(c, namespace) +} + +func (c *ImageV1Client) ImageStreamMappings(namespace string) ImageStreamMappingInterface { + return newImageStreamMappings(c, namespace) +} + +func (c *ImageV1Client) ImageStreamTags(namespace string) ImageStreamTagInterface { + return newImageStreamTags(c, namespace) +} + +func (c *ImageV1Client) ImageTags(namespace string) ImageTagInterface { + return newImageTags(c, namespace) +} + +// NewForConfig creates a new ImageV1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*ImageV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new ImageV1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*ImageV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) + if err != nil { + return nil, err + } + return &ImageV1Client{client}, nil +} + +// NewForConfigOrDie creates a new ImageV1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *ImageV1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new ImageV1Client for the given RESTClient. +func New(c rest.Interface) *ImageV1Client { + return &ImageV1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *ImageV1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/imagesignature.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/imagesignature.go new file mode 100644 index 000000000..195b8f371 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/imagesignature.go @@ -0,0 +1,59 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + + v1 "github.com/openshift/api/image/v1" + scheme "github.com/openshift/client-go/image/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + rest "k8s.io/client-go/rest" +) + +// ImageSignaturesGetter has a method to return a ImageSignatureInterface. +// A group's client should implement this interface. +type ImageSignaturesGetter interface { + ImageSignatures() ImageSignatureInterface +} + +// ImageSignatureInterface has methods to work with ImageSignature resources. +type ImageSignatureInterface interface { + Create(ctx context.Context, imageSignature *v1.ImageSignature, opts metav1.CreateOptions) (*v1.ImageSignature, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + ImageSignatureExpansion +} + +// imageSignatures implements ImageSignatureInterface +type imageSignatures struct { + client rest.Interface +} + +// newImageSignatures returns a ImageSignatures +func newImageSignatures(c *ImageV1Client) *imageSignatures { + return &imageSignatures{ + client: c.RESTClient(), + } +} + +// Create takes the representation of a imageSignature and creates it. Returns the server's representation of the imageSignature, and an error, if there is any. +func (c *imageSignatures) Create(ctx context.Context, imageSignature *v1.ImageSignature, opts metav1.CreateOptions) (result *v1.ImageSignature, err error) { + result = &v1.ImageSignature{} + err = c.client.Post(). + Resource("imagesignatures"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(imageSignature). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the imageSignature and deletes it. Returns an error if one occurs. +func (c *imageSignatures) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Resource("imagesignatures"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} diff --git a/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/imagestream.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/imagestream.go new file mode 100644 index 000000000..7c00e2fc6 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/imagestream.go @@ -0,0 +1,271 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + json "encoding/json" + "fmt" + "time" + + v1 "github.com/openshift/api/image/v1" + imagev1 "github.com/openshift/client-go/image/applyconfigurations/image/v1" + scheme "github.com/openshift/client-go/image/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" +) + +// ImageStreamsGetter has a method to return a ImageStreamInterface. +// A group's client should implement this interface. +type ImageStreamsGetter interface { + ImageStreams(namespace string) ImageStreamInterface +} + +// ImageStreamInterface has methods to work with ImageStream resources. +type ImageStreamInterface interface { + Create(ctx context.Context, imageStream *v1.ImageStream, opts metav1.CreateOptions) (*v1.ImageStream, error) + Update(ctx context.Context, imageStream *v1.ImageStream, opts metav1.UpdateOptions) (*v1.ImageStream, error) + UpdateStatus(ctx context.Context, imageStream *v1.ImageStream, opts metav1.UpdateOptions) (*v1.ImageStream, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ImageStream, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.ImageStreamList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ImageStream, err error) + Apply(ctx context.Context, imageStream *imagev1.ImageStreamApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ImageStream, err error) + ApplyStatus(ctx context.Context, imageStream *imagev1.ImageStreamApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ImageStream, err error) + Secrets(ctx context.Context, imageStreamName string, options metav1.GetOptions) (*v1.SecretList, error) + Layers(ctx context.Context, imageStreamName string, options metav1.GetOptions) (*v1.ImageStreamLayers, error) + + ImageStreamExpansion +} + +// imageStreams implements ImageStreamInterface +type imageStreams struct { + client rest.Interface + ns string +} + +// newImageStreams returns a ImageStreams +func newImageStreams(c *ImageV1Client, namespace string) *imageStreams { + return &imageStreams{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the imageStream, and returns the corresponding imageStream object, and an error if there is any. +func (c *imageStreams) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ImageStream, err error) { + result = &v1.ImageStream{} + err = c.client.Get(). + Namespace(c.ns). + Resource("imagestreams"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ImageStreams that match those selectors. +func (c *imageStreams) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ImageStreamList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.ImageStreamList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("imagestreams"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested imageStreams. +func (c *imageStreams) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("imagestreams"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a imageStream and creates it. Returns the server's representation of the imageStream, and an error, if there is any. +func (c *imageStreams) Create(ctx context.Context, imageStream *v1.ImageStream, opts metav1.CreateOptions) (result *v1.ImageStream, err error) { + result = &v1.ImageStream{} + err = c.client.Post(). + Namespace(c.ns). + Resource("imagestreams"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(imageStream). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a imageStream and updates it. Returns the server's representation of the imageStream, and an error, if there is any. +func (c *imageStreams) Update(ctx context.Context, imageStream *v1.ImageStream, opts metav1.UpdateOptions) (result *v1.ImageStream, err error) { + result = &v1.ImageStream{} + err = c.client.Put(). + Namespace(c.ns). + Resource("imagestreams"). + Name(imageStream.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(imageStream). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *imageStreams) UpdateStatus(ctx context.Context, imageStream *v1.ImageStream, opts metav1.UpdateOptions) (result *v1.ImageStream, err error) { + result = &v1.ImageStream{} + err = c.client.Put(). + Namespace(c.ns). + Resource("imagestreams"). + Name(imageStream.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(imageStream). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the imageStream and deletes it. Returns an error if one occurs. +func (c *imageStreams) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("imagestreams"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *imageStreams) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("imagestreams"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched imageStream. +func (c *imageStreams) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ImageStream, err error) { + result = &v1.ImageStream{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("imagestreams"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied imageStream. +func (c *imageStreams) Apply(ctx context.Context, imageStream *imagev1.ImageStreamApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ImageStream, err error) { + if imageStream == nil { + return nil, fmt.Errorf("imageStream provided to Apply must not be nil") + } + patchOpts := opts.ToPatchOptions() + data, err := json.Marshal(imageStream) + if err != nil { + return nil, err + } + name := imageStream.Name + if name == nil { + return nil, fmt.Errorf("imageStream.Name must be provided to Apply") + } + result = &v1.ImageStream{} + err = c.client.Patch(types.ApplyPatchType). + Namespace(c.ns). + Resource("imagestreams"). + Name(*name). + VersionedParams(&patchOpts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} + +// ApplyStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus(). +func (c *imageStreams) ApplyStatus(ctx context.Context, imageStream *imagev1.ImageStreamApplyConfiguration, opts metav1.ApplyOptions) (result *v1.ImageStream, err error) { + if imageStream == nil { + return nil, fmt.Errorf("imageStream provided to Apply must not be nil") + } + patchOpts := opts.ToPatchOptions() + data, err := json.Marshal(imageStream) + if err != nil { + return nil, err + } + + name := imageStream.Name + if name == nil { + return nil, fmt.Errorf("imageStream.Name must be provided to Apply") + } + + result = &v1.ImageStream{} + err = c.client.Patch(types.ApplyPatchType). + Namespace(c.ns). + Resource("imagestreams"). + Name(*name). + SubResource("status"). + VersionedParams(&patchOpts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} + +// Secrets takes name of the imageStream, and returns the corresponding v1.SecretList object, and an error if there is any. +func (c *imageStreams) Secrets(ctx context.Context, imageStreamName string, options metav1.GetOptions) (result *v1.SecretList, err error) { + result = &v1.SecretList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("imagestreams"). + Name(imageStreamName). + SubResource("secrets"). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// Layers takes name of the imageStream, and returns the corresponding v1.ImageStreamLayers object, and an error if there is any. +func (c *imageStreams) Layers(ctx context.Context, imageStreamName string, options metav1.GetOptions) (result *v1.ImageStreamLayers, err error) { + result = &v1.ImageStreamLayers{} + err = c.client.Get(). + Namespace(c.ns). + Resource("imagestreams"). + Name(imageStreamName). + SubResource("layers"). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/imagestreamimage.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/imagestreamimage.go new file mode 100644 index 000000000..79f46753a --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/imagestreamimage.go @@ -0,0 +1,51 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + + imagev1 "github.com/openshift/api/image/v1" + scheme "github.com/openshift/client-go/image/clientset/versioned/scheme" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + rest "k8s.io/client-go/rest" +) + +// ImageStreamImagesGetter has a method to return a ImageStreamImageInterface. +// A group's client should implement this interface. +type ImageStreamImagesGetter interface { + ImageStreamImages(namespace string) ImageStreamImageInterface +} + +// ImageStreamImageInterface has methods to work with ImageStreamImage resources. +type ImageStreamImageInterface interface { + Get(ctx context.Context, name string, opts v1.GetOptions) (*imagev1.ImageStreamImage, error) + ImageStreamImageExpansion +} + +// imageStreamImages implements ImageStreamImageInterface +type imageStreamImages struct { + client rest.Interface + ns string +} + +// newImageStreamImages returns a ImageStreamImages +func newImageStreamImages(c *ImageV1Client, namespace string) *imageStreamImages { + return &imageStreamImages{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the imageStreamImage, and returns the corresponding imageStreamImage object, and an error if there is any. +func (c *imageStreamImages) Get(ctx context.Context, name string, options v1.GetOptions) (result *imagev1.ImageStreamImage, err error) { + result = &imagev1.ImageStreamImage{} + err = c.client.Get(). + Namespace(c.ns). + Resource("imagestreamimages"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/imagestreamimport.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/imagestreamimport.go new file mode 100644 index 000000000..7c43c951d --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/imagestreamimport.go @@ -0,0 +1,51 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + + v1 "github.com/openshift/api/image/v1" + scheme "github.com/openshift/client-go/image/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + rest "k8s.io/client-go/rest" +) + +// ImageStreamImportsGetter has a method to return a ImageStreamImportInterface. +// A group's client should implement this interface. +type ImageStreamImportsGetter interface { + ImageStreamImports(namespace string) ImageStreamImportInterface +} + +// ImageStreamImportInterface has methods to work with ImageStreamImport resources. +type ImageStreamImportInterface interface { + Create(ctx context.Context, imageStreamImport *v1.ImageStreamImport, opts metav1.CreateOptions) (*v1.ImageStreamImport, error) + ImageStreamImportExpansion +} + +// imageStreamImports implements ImageStreamImportInterface +type imageStreamImports struct { + client rest.Interface + ns string +} + +// newImageStreamImports returns a ImageStreamImports +func newImageStreamImports(c *ImageV1Client, namespace string) *imageStreamImports { + return &imageStreamImports{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Create takes the representation of a imageStreamImport and creates it. Returns the server's representation of the imageStreamImport, and an error, if there is any. +func (c *imageStreamImports) Create(ctx context.Context, imageStreamImport *v1.ImageStreamImport, opts metav1.CreateOptions) (result *v1.ImageStreamImport, err error) { + result = &v1.ImageStreamImport{} + err = c.client.Post(). + Namespace(c.ns). + Resource("imagestreamimports"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(imageStreamImport). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/imagestreammapping.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/imagestreammapping.go new file mode 100644 index 000000000..b19c110b7 --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/imagestreammapping.go @@ -0,0 +1,83 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + json "encoding/json" + "fmt" + + imagev1 "github.com/openshift/api/image/v1" + v1 "github.com/openshift/client-go/image/applyconfigurations/image/v1" + scheme "github.com/openshift/client-go/image/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + rest "k8s.io/client-go/rest" +) + +// ImageStreamMappingsGetter has a method to return a ImageStreamMappingInterface. +// A group's client should implement this interface. +type ImageStreamMappingsGetter interface { + ImageStreamMappings(namespace string) ImageStreamMappingInterface +} + +// ImageStreamMappingInterface has methods to work with ImageStreamMapping resources. +type ImageStreamMappingInterface interface { + Apply(ctx context.Context, imageStreamMapping *v1.ImageStreamMappingApplyConfiguration, opts metav1.ApplyOptions) (result *imagev1.ImageStreamMapping, err error) + Create(ctx context.Context, imageStreamMapping *imagev1.ImageStreamMapping, opts metav1.CreateOptions) (*metav1.Status, error) + + ImageStreamMappingExpansion +} + +// imageStreamMappings implements ImageStreamMappingInterface +type imageStreamMappings struct { + client rest.Interface + ns string +} + +// newImageStreamMappings returns a ImageStreamMappings +func newImageStreamMappings(c *ImageV1Client, namespace string) *imageStreamMappings { + return &imageStreamMappings{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied imageStreamMapping. +func (c *imageStreamMappings) Apply(ctx context.Context, imageStreamMapping *v1.ImageStreamMappingApplyConfiguration, opts metav1.ApplyOptions) (result *imagev1.ImageStreamMapping, err error) { + if imageStreamMapping == nil { + return nil, fmt.Errorf("imageStreamMapping provided to Apply must not be nil") + } + patchOpts := opts.ToPatchOptions() + data, err := json.Marshal(imageStreamMapping) + if err != nil { + return nil, err + } + name := imageStreamMapping.Name + if name == nil { + return nil, fmt.Errorf("imageStreamMapping.Name must be provided to Apply") + } + result = &imagev1.ImageStreamMapping{} + err = c.client.Patch(types.ApplyPatchType). + Namespace(c.ns). + Resource("imagestreammappings"). + Name(*name). + VersionedParams(&patchOpts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} + +// Create takes the representation of a imageStreamMapping and creates it. Returns the server's representation of the status, and an error, if there is any. +func (c *imageStreamMappings) Create(ctx context.Context, imageStreamMapping *imagev1.ImageStreamMapping, opts metav1.CreateOptions) (result *metav1.Status, err error) { + result = &metav1.Status{} + err = c.client.Post(). + Namespace(c.ns). + Resource("imagestreammappings"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(imageStreamMapping). + Do(ctx). + Into(result) + return +} diff --git a/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/imagestreamtag.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/imagestreamtag.go new file mode 100644 index 000000000..4ea36ccdc --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/imagestreamtag.go @@ -0,0 +1,111 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + v1 "github.com/openshift/api/image/v1" + scheme "github.com/openshift/client-go/image/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + rest "k8s.io/client-go/rest" +) + +// ImageStreamTagsGetter has a method to return a ImageStreamTagInterface. +// A group's client should implement this interface. +type ImageStreamTagsGetter interface { + ImageStreamTags(namespace string) ImageStreamTagInterface +} + +// ImageStreamTagInterface has methods to work with ImageStreamTag resources. +type ImageStreamTagInterface interface { + Create(ctx context.Context, imageStreamTag *v1.ImageStreamTag, opts metav1.CreateOptions) (*v1.ImageStreamTag, error) + Update(ctx context.Context, imageStreamTag *v1.ImageStreamTag, opts metav1.UpdateOptions) (*v1.ImageStreamTag, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ImageStreamTag, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.ImageStreamTagList, error) + ImageStreamTagExpansion +} + +// imageStreamTags implements ImageStreamTagInterface +type imageStreamTags struct { + client rest.Interface + ns string +} + +// newImageStreamTags returns a ImageStreamTags +func newImageStreamTags(c *ImageV1Client, namespace string) *imageStreamTags { + return &imageStreamTags{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the imageStreamTag, and returns the corresponding imageStreamTag object, and an error if there is any. +func (c *imageStreamTags) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ImageStreamTag, err error) { + result = &v1.ImageStreamTag{} + err = c.client.Get(). + Namespace(c.ns). + Resource("imagestreamtags"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ImageStreamTags that match those selectors. +func (c *imageStreamTags) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ImageStreamTagList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.ImageStreamTagList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("imagestreamtags"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Create takes the representation of a imageStreamTag and creates it. Returns the server's representation of the imageStreamTag, and an error, if there is any. +func (c *imageStreamTags) Create(ctx context.Context, imageStreamTag *v1.ImageStreamTag, opts metav1.CreateOptions) (result *v1.ImageStreamTag, err error) { + result = &v1.ImageStreamTag{} + err = c.client.Post(). + Namespace(c.ns). + Resource("imagestreamtags"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(imageStreamTag). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a imageStreamTag and updates it. Returns the server's representation of the imageStreamTag, and an error, if there is any. +func (c *imageStreamTags) Update(ctx context.Context, imageStreamTag *v1.ImageStreamTag, opts metav1.UpdateOptions) (result *v1.ImageStreamTag, err error) { + result = &v1.ImageStreamTag{} + err = c.client.Put(). + Namespace(c.ns). + Resource("imagestreamtags"). + Name(imageStreamTag.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(imageStreamTag). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the imageStreamTag and deletes it. Returns an error if one occurs. +func (c *imageStreamTags) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("imagestreamtags"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} diff --git a/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/imagetag.go b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/imagetag.go new file mode 100644 index 000000000..a0d80e3ac --- /dev/null +++ b/vendor/github.com/openshift/client-go/image/clientset/versioned/typed/image/v1/imagetag.go @@ -0,0 +1,111 @@ +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + v1 "github.com/openshift/api/image/v1" + scheme "github.com/openshift/client-go/image/clientset/versioned/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + rest "k8s.io/client-go/rest" +) + +// ImageTagsGetter has a method to return a ImageTagInterface. +// A group's client should implement this interface. +type ImageTagsGetter interface { + ImageTags(namespace string) ImageTagInterface +} + +// ImageTagInterface has methods to work with ImageTag resources. +type ImageTagInterface interface { + Create(ctx context.Context, imageTag *v1.ImageTag, opts metav1.CreateOptions) (*v1.ImageTag, error) + Update(ctx context.Context, imageTag *v1.ImageTag, opts metav1.UpdateOptions) (*v1.ImageTag, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ImageTag, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.ImageTagList, error) + ImageTagExpansion +} + +// imageTags implements ImageTagInterface +type imageTags struct { + client rest.Interface + ns string +} + +// newImageTags returns a ImageTags +func newImageTags(c *ImageV1Client, namespace string) *imageTags { + return &imageTags{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the imageTag, and returns the corresponding imageTag object, and an error if there is any. +func (c *imageTags) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ImageTag, err error) { + result = &v1.ImageTag{} + err = c.client.Get(). + Namespace(c.ns). + Resource("imagetags"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ImageTags that match those selectors. +func (c *imageTags) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ImageTagList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.ImageTagList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("imagetags"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Create takes the representation of a imageTag and creates it. Returns the server's representation of the imageTag, and an error, if there is any. +func (c *imageTags) Create(ctx context.Context, imageTag *v1.ImageTag, opts metav1.CreateOptions) (result *v1.ImageTag, err error) { + result = &v1.ImageTag{} + err = c.client.Post(). + Namespace(c.ns). + Resource("imagetags"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(imageTag). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a imageTag and updates it. Returns the server's representation of the imageTag, and an error, if there is any. +func (c *imageTags) Update(ctx context.Context, imageTag *v1.ImageTag, opts metav1.UpdateOptions) (result *v1.ImageTag, err error) { + result = &v1.ImageTag{} + err = c.client.Put(). + Namespace(c.ns). + Resource("imagetags"). + Name(imageTag.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(imageTag). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the imageTag and deletes it. Returns an error if one occurs. +func (c *imageTags) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("imagetags"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/internal/internal.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/internal/internal.go index b6e78709e..fa14b6bfe 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/internal/internal.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/internal/internal.go @@ -139,6 +139,12 @@ var schemaYAML = typed.YAMLObject(`types: discriminatorValue: Modern - fieldName: old discriminatorValue: Old +- name: com.github.openshift.api.operator.v1.AWSCSIDriverConfigSpec + map: + fields: + - name: kmsKeyARN + type: + scalar: string - name: com.github.openshift.api.operator.v1.AWSClassicLoadBalancerParameters map: fields: @@ -300,6 +306,57 @@ var schemaYAML = typed.YAMLObject(`types: - name: version type: scalar: string +- name: com.github.openshift.api.operator.v1.AzureCSIDriverConfigSpec + map: + fields: + - name: diskEncryptionSet + type: + namedType: com.github.openshift.api.operator.v1.AzureDiskEncryptionSet +- name: com.github.openshift.api.operator.v1.AzureDiskEncryptionSet + map: + fields: + - name: name + type: + scalar: string + default: "" + - name: resourceGroup + type: + scalar: string + default: "" + - name: subscriptionID + type: + scalar: string + default: "" +- name: com.github.openshift.api.operator.v1.CSIDriverConfigSpec + map: + fields: + - name: aws + type: + namedType: com.github.openshift.api.operator.v1.AWSCSIDriverConfigSpec + - name: azure + type: + namedType: com.github.openshift.api.operator.v1.AzureCSIDriverConfigSpec + - name: driverType + type: + scalar: string + default: "" + - name: gcp + type: + namedType: com.github.openshift.api.operator.v1.GCPCSIDriverConfigSpec + - name: vSphere + type: + namedType: com.github.openshift.api.operator.v1.VSphereCSIDriverConfigSpec + unions: + - discriminator: driverType + fields: + - fieldName: aws + discriminatorValue: AWS + - fieldName: azure + discriminatorValue: Azure + - fieldName: gcp + discriminatorValue: GCP + - fieldName: vSphere + discriminatorValue: VSphere - name: com.github.openshift.api.operator.v1.CSISnapshotController map: fields: @@ -478,6 +535,10 @@ var schemaYAML = typed.YAMLObject(`types: - name: com.github.openshift.api.operator.v1.ClusterCSIDriverSpec map: fields: + - name: driverConfig + type: + namedType: com.github.openshift.api.operator.v1.CSIDriverConfigSpec + default: {} - name: logLevel type: scalar: string @@ -657,6 +718,14 @@ var schemaYAML = typed.YAMLObject(`types: - name: documentationBaseURL type: scalar: string + - name: perspectives + type: + list: + elementType: + namedType: com.github.openshift.api.operator.v1.Perspective + elementRelationship: associative + keys: + - id - name: projectAccess type: namedType: com.github.openshift.api.operator.v1.ProjectAccess @@ -930,6 +999,36 @@ var schemaYAML = typed.YAMLObject(`types: elementType: namedType: com.github.openshift.api.operator.v1.DeveloperConsoleCatalogCategory elementRelationship: atomic + - name: types + type: + namedType: com.github.openshift.api.operator.v1.DeveloperConsoleCatalogTypes + default: {} +- name: com.github.openshift.api.operator.v1.DeveloperConsoleCatalogTypes + map: + fields: + - name: disabled + type: + list: + elementType: + scalar: string + elementRelationship: associative + - name: enabled + type: + list: + elementType: + scalar: string + elementRelationship: associative + - name: state + type: + scalar: string + default: Enabled + unions: + - discriminator: state + fields: + - fieldName: disabled + discriminatorValue: Disabled + - fieldName: enabled + discriminatorValue: Enabled - name: com.github.openshift.api.operator.v1.EgressIPConfig map: fields: @@ -1087,6 +1186,10 @@ var schemaYAML = typed.YAMLObject(`types: - name: policy type: scalar: string + - name: protocolStrategy + type: + scalar: string + default: "" - name: transportConfig type: namedType: com.github.openshift.api.operator.v1.DNSTransportConfig @@ -1097,6 +1200,30 @@ var schemaYAML = typed.YAMLObject(`types: elementType: scalar: string elementRelationship: atomic +- name: com.github.openshift.api.operator.v1.GCPCSIDriverConfigSpec + map: + fields: + - name: kmsKey + type: + namedType: com.github.openshift.api.operator.v1.GCPKMSKeyReference +- name: com.github.openshift.api.operator.v1.GCPKMSKeyReference + map: + fields: + - name: keyRing + type: + scalar: string + default: "" + - name: location + type: + scalar: string + - name: name + type: + scalar: string + default: "" + - name: projectID + type: + scalar: string + default: "" - name: com.github.openshift.api.operator.v1.GCPLoadBalancerParameters map: fields: @@ -1106,6 +1233,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: com.github.openshift.api.operator.v1.GatewayConfig map: fields: + - name: ipForwarding + type: + scalar: string - name: routingViaHost type: scalar: boolean @@ -1224,6 +1354,12 @@ var schemaYAML = typed.YAMLObject(`types: - name: hybridOverlayVXLANPort type: scalar: numeric +- name: com.github.openshift.api.operator.v1.IBMLoadBalancerParameters + map: + fields: + - name: protocol + type: + scalar: string - name: com.github.openshift.api.operator.v1.IPAMConfig map: fields: @@ -1570,6 +1706,10 @@ var schemaYAML = typed.YAMLObject(`types: - name: com.github.openshift.api.operator.v1.InsightsReport map: fields: + - name: downloadedAt + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + default: {} - name: healthChecks type: list: @@ -1664,6 +1804,12 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: numeric default: 0 + - name: serviceAccountIssuers + type: + list: + elementType: + namedType: com.github.openshift.api.operator.v1.ServiceAccountIssuerStatus + elementRelationship: atomic - name: version type: scalar: string @@ -1950,10 +2096,16 @@ var schemaYAML = typed.YAMLObject(`types: - name: com.github.openshift.api.operator.v1.LoadBalancerStrategy map: fields: + - name: allowedSourceRanges + type: + list: + elementType: + scalar: string + elementRelationship: atomic - name: dnsManagementPolicy type: scalar: string - default: "" + default: Managed - name: providerParameters type: namedType: com.github.openshift.api.operator.v1.ProviderLoadBalancerParameters @@ -2398,6 +2550,53 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" +- name: com.github.openshift.api.operator.v1.Perspective + map: + fields: + - name: id + type: + scalar: string + default: "" + - name: pinnedResources + type: + list: + elementType: + namedType: com.github.openshift.api.operator.v1.PinnedResourceReference + elementRelationship: atomic + - name: visibility + type: + namedType: com.github.openshift.api.operator.v1.PerspectiveVisibility + default: {} +- name: com.github.openshift.api.operator.v1.PerspectiveVisibility + map: + fields: + - name: accessReview + type: + namedType: com.github.openshift.api.operator.v1.ResourceAttributesAccessReview + - name: state + type: + scalar: string + default: "" + unions: + - discriminator: state + fields: + - fieldName: accessReview + discriminatorValue: AccessReview +- name: com.github.openshift.api.operator.v1.PinnedResourceReference + map: + fields: + - name: group + type: + scalar: string + default: "" + - name: resource + type: + scalar: string + default: "" + - name: version + type: + scalar: string + default: "" - name: com.github.openshift.api.operator.v1.PolicyAuditConfig map: fields: @@ -2407,6 +2606,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: maxFileSize type: scalar: numeric + - name: maxLogFiles + type: + scalar: numeric - name: rateLimit type: scalar: numeric @@ -2437,6 +2639,9 @@ var schemaYAML = typed.YAMLObject(`types: - name: gcp type: namedType: com.github.openshift.api.operator.v1.GCPLoadBalancerParameters + - name: ibm + type: + namedType: com.github.openshift.api.operator.v1.IBMLoadBalancerParameters - name: type type: scalar: string @@ -2448,6 +2653,8 @@ var schemaYAML = typed.YAMLObject(`types: discriminatorValue: AWS - fieldName: gcp discriminatorValue: GCP + - fieldName: ibm + discriminatorValue: IBM - name: com.github.openshift.api.operator.v1.ProxyConfig map: fields: @@ -2474,6 +2681,21 @@ var schemaYAML = typed.YAMLObject(`types: elementType: scalar: string elementRelationship: atomic +- name: com.github.openshift.api.operator.v1.ResourceAttributesAccessReview + map: + fields: + - name: missing + type: + list: + elementType: + namedType: io.k8s.api.authorization.v1.ResourceAttributes + elementRelationship: atomic + - name: required + type: + list: + elementType: + namedType: io.k8s.api.authorization.v1.ResourceAttributes + elementRelationship: atomic - name: com.github.openshift.api.operator.v1.RouteAdmissionPolicy map: fields: @@ -2509,6 +2731,16 @@ var schemaYAML = typed.YAMLObject(`types: elementType: scalar: string elementRelationship: atomic +- name: com.github.openshift.api.operator.v1.ServiceAccountIssuerStatus + map: + fields: + - name: expirationTime + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Time + - name: name + type: + scalar: string + default: "" - name: com.github.openshift.api.operator.v1.ServiceCA map: fields: @@ -2830,6 +3062,10 @@ var schemaYAML = typed.YAMLObject(`types: type: namedType: __untyped_atomic_ default: {} + - name: vsphereStorageDriver + type: + scalar: string + default: "" - name: com.github.openshift.api.operator.v1.StorageStatus map: fields: @@ -2891,6 +3127,10 @@ var schemaYAML = typed.YAMLObject(`types: - name: policy type: scalar: string + - name: protocolStrategy + type: + scalar: string + default: "" - name: transportConfig type: namedType: com.github.openshift.api.operator.v1.DNSTransportConfig @@ -2901,6 +3141,15 @@ var schemaYAML = typed.YAMLObject(`types: elementType: namedType: com.github.openshift.api.operator.v1.Upstream elementRelationship: atomic +- name: com.github.openshift.api.operator.v1.VSphereCSIDriverConfigSpec + map: + fields: + - name: topologyCategories + type: + list: + elementType: + scalar: string + elementRelationship: atomic - name: com.github.openshift.api.operator.v1alpha1.ImageContentSourcePolicy map: fields: @@ -2940,6 +3189,30 @@ var schemaYAML = typed.YAMLObject(`types: type: scalar: string default: "" +- name: io.k8s.api.authorization.v1.ResourceAttributes + map: + fields: + - name: group + type: + scalar: string + - name: name + type: + scalar: string + - name: namespace + type: + scalar: string + - name: resource + type: + scalar: string + - name: subresource + type: + scalar: string + - name: verb + type: + scalar: string + - name: version + type: + scalar: string - name: io.k8s.api.core.v1.LocalObjectReference map: fields: diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/awscsidriverconfigspec.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/awscsidriverconfigspec.go new file mode 100644 index 000000000..7a4fa7ab3 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/awscsidriverconfigspec.go @@ -0,0 +1,23 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// AWSCSIDriverConfigSpecApplyConfiguration represents an declarative configuration of the AWSCSIDriverConfigSpec type for use +// with apply. +type AWSCSIDriverConfigSpecApplyConfiguration struct { + KMSKeyARN *string `json:"kmsKeyARN,omitempty"` +} + +// AWSCSIDriverConfigSpecApplyConfiguration constructs an declarative configuration of the AWSCSIDriverConfigSpec type for use with +// apply. +func AWSCSIDriverConfigSpec() *AWSCSIDriverConfigSpecApplyConfiguration { + return &AWSCSIDriverConfigSpecApplyConfiguration{} +} + +// WithKMSKeyARN sets the KMSKeyARN field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the KMSKeyARN field is set to the value of the last call. +func (b *AWSCSIDriverConfigSpecApplyConfiguration) WithKMSKeyARN(value string) *AWSCSIDriverConfigSpecApplyConfiguration { + b.KMSKeyARN = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/azurecsidriverconfigspec.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/azurecsidriverconfigspec.go new file mode 100644 index 000000000..46366804c --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/azurecsidriverconfigspec.go @@ -0,0 +1,23 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// AzureCSIDriverConfigSpecApplyConfiguration represents an declarative configuration of the AzureCSIDriverConfigSpec type for use +// with apply. +type AzureCSIDriverConfigSpecApplyConfiguration struct { + DiskEncryptionSet *AzureDiskEncryptionSetApplyConfiguration `json:"diskEncryptionSet,omitempty"` +} + +// AzureCSIDriverConfigSpecApplyConfiguration constructs an declarative configuration of the AzureCSIDriverConfigSpec type for use with +// apply. +func AzureCSIDriverConfigSpec() *AzureCSIDriverConfigSpecApplyConfiguration { + return &AzureCSIDriverConfigSpecApplyConfiguration{} +} + +// WithDiskEncryptionSet sets the DiskEncryptionSet field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DiskEncryptionSet field is set to the value of the last call. +func (b *AzureCSIDriverConfigSpecApplyConfiguration) WithDiskEncryptionSet(value *AzureDiskEncryptionSetApplyConfiguration) *AzureCSIDriverConfigSpecApplyConfiguration { + b.DiskEncryptionSet = value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/azurediskencryptionset.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/azurediskencryptionset.go new file mode 100644 index 000000000..832417011 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/azurediskencryptionset.go @@ -0,0 +1,41 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// AzureDiskEncryptionSetApplyConfiguration represents an declarative configuration of the AzureDiskEncryptionSet type for use +// with apply. +type AzureDiskEncryptionSetApplyConfiguration struct { + SubscriptionID *string `json:"subscriptionID,omitempty"` + ResourceGroup *string `json:"resourceGroup,omitempty"` + Name *string `json:"name,omitempty"` +} + +// AzureDiskEncryptionSetApplyConfiguration constructs an declarative configuration of the AzureDiskEncryptionSet type for use with +// apply. +func AzureDiskEncryptionSet() *AzureDiskEncryptionSetApplyConfiguration { + return &AzureDiskEncryptionSetApplyConfiguration{} +} + +// WithSubscriptionID sets the SubscriptionID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SubscriptionID field is set to the value of the last call. +func (b *AzureDiskEncryptionSetApplyConfiguration) WithSubscriptionID(value string) *AzureDiskEncryptionSetApplyConfiguration { + b.SubscriptionID = &value + return b +} + +// WithResourceGroup sets the ResourceGroup field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceGroup field is set to the value of the last call. +func (b *AzureDiskEncryptionSetApplyConfiguration) WithResourceGroup(value string) *AzureDiskEncryptionSetApplyConfiguration { + b.ResourceGroup = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *AzureDiskEncryptionSetApplyConfiguration) WithName(value string) *AzureDiskEncryptionSetApplyConfiguration { + b.Name = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/clustercsidriverspec.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/clustercsidriverspec.go index b6f8495d5..9cd40d258 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/clustercsidriverspec.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/clustercsidriverspec.go @@ -11,7 +11,8 @@ import ( // with apply. type ClusterCSIDriverSpecApplyConfiguration struct { OperatorSpecApplyConfiguration `json:",inline"` - StorageClassState *operatorv1.StorageClassStateName `json:"storageClassState,omitempty"` + StorageClassState *operatorv1.StorageClassStateName `json:"storageClassState,omitempty"` + DriverConfig *CSIDriverConfigSpecApplyConfiguration `json:"driverConfig,omitempty"` } // ClusterCSIDriverSpecApplyConfiguration constructs an declarative configuration of the ClusterCSIDriverSpec type for use with @@ -67,3 +68,11 @@ func (b *ClusterCSIDriverSpecApplyConfiguration) WithStorageClassState(value ope b.StorageClassState = &value return b } + +// WithDriverConfig sets the DriverConfig field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DriverConfig field is set to the value of the last call. +func (b *ClusterCSIDriverSpecApplyConfiguration) WithDriverConfig(value *CSIDriverConfigSpecApplyConfiguration) *ClusterCSIDriverSpecApplyConfiguration { + b.DriverConfig = value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/consolecustomization.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/consolecustomization.go index a9c4bcfd8..c2d95e2b9 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/consolecustomization.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/consolecustomization.go @@ -18,6 +18,7 @@ type ConsoleCustomizationApplyConfiguration struct { ProjectAccess *ProjectAccessApplyConfiguration `json:"projectAccess,omitempty"` QuickStarts *QuickStartsApplyConfiguration `json:"quickStarts,omitempty"` AddPage *AddPageApplyConfiguration `json:"addPage,omitempty"` + Perspectives []PerspectiveApplyConfiguration `json:"perspectives,omitempty"` } // ConsoleCustomizationApplyConfiguration constructs an declarative configuration of the ConsoleCustomization type for use with @@ -89,3 +90,16 @@ func (b *ConsoleCustomizationApplyConfiguration) WithAddPage(value *AddPageApply b.AddPage = value return b } + +// WithPerspectives adds the given value to the Perspectives field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Perspectives field. +func (b *ConsoleCustomizationApplyConfiguration) WithPerspectives(values ...*PerspectiveApplyConfiguration) *ConsoleCustomizationApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithPerspectives") + } + b.Perspectives = append(b.Perspectives, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/csidriverconfigspec.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/csidriverconfigspec.go new file mode 100644 index 000000000..6b5236e98 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/csidriverconfigspec.go @@ -0,0 +1,63 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/openshift/api/operator/v1" +) + +// CSIDriverConfigSpecApplyConfiguration represents an declarative configuration of the CSIDriverConfigSpec type for use +// with apply. +type CSIDriverConfigSpecApplyConfiguration struct { + DriverType *v1.CSIDriverType `json:"driverType,omitempty"` + AWS *AWSCSIDriverConfigSpecApplyConfiguration `json:"aws,omitempty"` + Azure *AzureCSIDriverConfigSpecApplyConfiguration `json:"azure,omitempty"` + GCP *GCPCSIDriverConfigSpecApplyConfiguration `json:"gcp,omitempty"` + VSphere *VSphereCSIDriverConfigSpecApplyConfiguration `json:"vSphere,omitempty"` +} + +// CSIDriverConfigSpecApplyConfiguration constructs an declarative configuration of the CSIDriverConfigSpec type for use with +// apply. +func CSIDriverConfigSpec() *CSIDriverConfigSpecApplyConfiguration { + return &CSIDriverConfigSpecApplyConfiguration{} +} + +// WithDriverType sets the DriverType field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DriverType field is set to the value of the last call. +func (b *CSIDriverConfigSpecApplyConfiguration) WithDriverType(value v1.CSIDriverType) *CSIDriverConfigSpecApplyConfiguration { + b.DriverType = &value + return b +} + +// WithAWS sets the AWS field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AWS field is set to the value of the last call. +func (b *CSIDriverConfigSpecApplyConfiguration) WithAWS(value *AWSCSIDriverConfigSpecApplyConfiguration) *CSIDriverConfigSpecApplyConfiguration { + b.AWS = value + return b +} + +// WithAzure sets the Azure field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Azure field is set to the value of the last call. +func (b *CSIDriverConfigSpecApplyConfiguration) WithAzure(value *AzureCSIDriverConfigSpecApplyConfiguration) *CSIDriverConfigSpecApplyConfiguration { + b.Azure = value + return b +} + +// WithGCP sets the GCP field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GCP field is set to the value of the last call. +func (b *CSIDriverConfigSpecApplyConfiguration) WithGCP(value *GCPCSIDriverConfigSpecApplyConfiguration) *CSIDriverConfigSpecApplyConfiguration { + b.GCP = value + return b +} + +// WithVSphere sets the VSphere field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the VSphere field is set to the value of the last call. +func (b *CSIDriverConfigSpecApplyConfiguration) WithVSphere(value *VSphereCSIDriverConfigSpecApplyConfiguration) *CSIDriverConfigSpecApplyConfiguration { + b.VSphere = value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/developerconsolecatalogcustomization.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/developerconsolecatalogcustomization.go index e040d5eef..6bb4f21eb 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/developerconsolecatalogcustomization.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/developerconsolecatalogcustomization.go @@ -6,6 +6,7 @@ package v1 // with apply. type DeveloperConsoleCatalogCustomizationApplyConfiguration struct { Categories []DeveloperConsoleCatalogCategoryApplyConfiguration `json:"categories,omitempty"` + Types *DeveloperConsoleCatalogTypesApplyConfiguration `json:"types,omitempty"` } // DeveloperConsoleCatalogCustomizationApplyConfiguration constructs an declarative configuration of the DeveloperConsoleCatalogCustomization type for use with @@ -26,3 +27,11 @@ func (b *DeveloperConsoleCatalogCustomizationApplyConfiguration) WithCategories( } return b } + +// WithTypes sets the Types field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Types field is set to the value of the last call. +func (b *DeveloperConsoleCatalogCustomizationApplyConfiguration) WithTypes(value *DeveloperConsoleCatalogTypesApplyConfiguration) *DeveloperConsoleCatalogCustomizationApplyConfiguration { + b.Types = value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/developerconsolecatalogtypes.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/developerconsolecatalogtypes.go new file mode 100644 index 000000000..de08f953e --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/developerconsolecatalogtypes.go @@ -0,0 +1,45 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/openshift/api/operator/v1" +) + +// DeveloperConsoleCatalogTypesApplyConfiguration represents an declarative configuration of the DeveloperConsoleCatalogTypes type for use +// with apply. +type DeveloperConsoleCatalogTypesApplyConfiguration struct { + State *v1.CatalogTypesState `json:"state,omitempty"` + Enabled *[]string `json:"enabled,omitempty"` + Disabled *[]string `json:"disabled,omitempty"` +} + +// DeveloperConsoleCatalogTypesApplyConfiguration constructs an declarative configuration of the DeveloperConsoleCatalogTypes type for use with +// apply. +func DeveloperConsoleCatalogTypes() *DeveloperConsoleCatalogTypesApplyConfiguration { + return &DeveloperConsoleCatalogTypesApplyConfiguration{} +} + +// WithState sets the State field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the State field is set to the value of the last call. +func (b *DeveloperConsoleCatalogTypesApplyConfiguration) WithState(value v1.CatalogTypesState) *DeveloperConsoleCatalogTypesApplyConfiguration { + b.State = &value + return b +} + +// WithEnabled sets the Enabled field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Enabled field is set to the value of the last call. +func (b *DeveloperConsoleCatalogTypesApplyConfiguration) WithEnabled(value []string) *DeveloperConsoleCatalogTypesApplyConfiguration { + b.Enabled = &value + return b +} + +// WithDisabled sets the Disabled field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Disabled field is set to the value of the last call. +func (b *DeveloperConsoleCatalogTypesApplyConfiguration) WithDisabled(value []string) *DeveloperConsoleCatalogTypesApplyConfiguration { + b.Disabled = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/forwardplugin.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/forwardplugin.go index ebf8371bf..8d8ef6bc8 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/forwardplugin.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/forwardplugin.go @@ -9,9 +9,10 @@ import ( // ForwardPluginApplyConfiguration represents an declarative configuration of the ForwardPlugin type for use // with apply. type ForwardPluginApplyConfiguration struct { - Upstreams []string `json:"upstreams,omitempty"` - Policy *v1.ForwardingPolicy `json:"policy,omitempty"` - TransportConfig *DNSTransportConfigApplyConfiguration `json:"transportConfig,omitempty"` + Upstreams []string `json:"upstreams,omitempty"` + Policy *v1.ForwardingPolicy `json:"policy,omitempty"` + TransportConfig *DNSTransportConfigApplyConfiguration `json:"transportConfig,omitempty"` + ProtocolStrategy *v1.ProtocolStrategy `json:"protocolStrategy,omitempty"` } // ForwardPluginApplyConfiguration constructs an declarative configuration of the ForwardPlugin type for use with @@ -45,3 +46,11 @@ func (b *ForwardPluginApplyConfiguration) WithTransportConfig(value *DNSTranspor b.TransportConfig = value return b } + +// WithProtocolStrategy sets the ProtocolStrategy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ProtocolStrategy field is set to the value of the last call. +func (b *ForwardPluginApplyConfiguration) WithProtocolStrategy(value v1.ProtocolStrategy) *ForwardPluginApplyConfiguration { + b.ProtocolStrategy = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/gatewayconfig.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/gatewayconfig.go index 7868d0ec7..269aaf91c 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/gatewayconfig.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/gatewayconfig.go @@ -2,10 +2,15 @@ package v1 +import ( + v1 "github.com/openshift/api/operator/v1" +) + // GatewayConfigApplyConfiguration represents an declarative configuration of the GatewayConfig type for use // with apply. type GatewayConfigApplyConfiguration struct { - RoutingViaHost *bool `json:"routingViaHost,omitempty"` + RoutingViaHost *bool `json:"routingViaHost,omitempty"` + IPForwarding *v1.IPForwardingMode `json:"ipForwarding,omitempty"` } // GatewayConfigApplyConfiguration constructs an declarative configuration of the GatewayConfig type for use with @@ -21,3 +26,11 @@ func (b *GatewayConfigApplyConfiguration) WithRoutingViaHost(value bool) *Gatewa b.RoutingViaHost = &value return b } + +// WithIPForwarding sets the IPForwarding field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the IPForwarding field is set to the value of the last call. +func (b *GatewayConfigApplyConfiguration) WithIPForwarding(value v1.IPForwardingMode) *GatewayConfigApplyConfiguration { + b.IPForwarding = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/gcpcsidriverconfigspec.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/gcpcsidriverconfigspec.go new file mode 100644 index 000000000..c0f596273 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/gcpcsidriverconfigspec.go @@ -0,0 +1,23 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// GCPCSIDriverConfigSpecApplyConfiguration represents an declarative configuration of the GCPCSIDriverConfigSpec type for use +// with apply. +type GCPCSIDriverConfigSpecApplyConfiguration struct { + KMSKey *GCPKMSKeyReferenceApplyConfiguration `json:"kmsKey,omitempty"` +} + +// GCPCSIDriverConfigSpecApplyConfiguration constructs an declarative configuration of the GCPCSIDriverConfigSpec type for use with +// apply. +func GCPCSIDriverConfigSpec() *GCPCSIDriverConfigSpecApplyConfiguration { + return &GCPCSIDriverConfigSpecApplyConfiguration{} +} + +// WithKMSKey sets the KMSKey field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the KMSKey field is set to the value of the last call. +func (b *GCPCSIDriverConfigSpecApplyConfiguration) WithKMSKey(value *GCPKMSKeyReferenceApplyConfiguration) *GCPCSIDriverConfigSpecApplyConfiguration { + b.KMSKey = value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/gcpkmskeyreference.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/gcpkmskeyreference.go new file mode 100644 index 000000000..64fc0fd47 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/gcpkmskeyreference.go @@ -0,0 +1,50 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// GCPKMSKeyReferenceApplyConfiguration represents an declarative configuration of the GCPKMSKeyReference type for use +// with apply. +type GCPKMSKeyReferenceApplyConfiguration struct { + Name *string `json:"name,omitempty"` + KeyRing *string `json:"keyRing,omitempty"` + ProjectID *string `json:"projectID,omitempty"` + Location *string `json:"location,omitempty"` +} + +// GCPKMSKeyReferenceApplyConfiguration constructs an declarative configuration of the GCPKMSKeyReference type for use with +// apply. +func GCPKMSKeyReference() *GCPKMSKeyReferenceApplyConfiguration { + return &GCPKMSKeyReferenceApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *GCPKMSKeyReferenceApplyConfiguration) WithName(value string) *GCPKMSKeyReferenceApplyConfiguration { + b.Name = &value + return b +} + +// WithKeyRing sets the KeyRing field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the KeyRing field is set to the value of the last call. +func (b *GCPKMSKeyReferenceApplyConfiguration) WithKeyRing(value string) *GCPKMSKeyReferenceApplyConfiguration { + b.KeyRing = &value + return b +} + +// WithProjectID sets the ProjectID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ProjectID field is set to the value of the last call. +func (b *GCPKMSKeyReferenceApplyConfiguration) WithProjectID(value string) *GCPKMSKeyReferenceApplyConfiguration { + b.ProjectID = &value + return b +} + +// WithLocation sets the Location field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Location field is set to the value of the last call. +func (b *GCPKMSKeyReferenceApplyConfiguration) WithLocation(value string) *GCPKMSKeyReferenceApplyConfiguration { + b.Location = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ibmloadbalancerparameters.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ibmloadbalancerparameters.go new file mode 100644 index 000000000..595b5cb51 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/ibmloadbalancerparameters.go @@ -0,0 +1,27 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/openshift/api/operator/v1" +) + +// IBMLoadBalancerParametersApplyConfiguration represents an declarative configuration of the IBMLoadBalancerParameters type for use +// with apply. +type IBMLoadBalancerParametersApplyConfiguration struct { + Protocol *v1.IngressControllerProtocol `json:"protocol,omitempty"` +} + +// IBMLoadBalancerParametersApplyConfiguration constructs an declarative configuration of the IBMLoadBalancerParameters type for use with +// apply. +func IBMLoadBalancerParameters() *IBMLoadBalancerParametersApplyConfiguration { + return &IBMLoadBalancerParametersApplyConfiguration{} +} + +// WithProtocol sets the Protocol field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Protocol field is set to the value of the last call. +func (b *IBMLoadBalancerParametersApplyConfiguration) WithProtocol(value v1.IngressControllerProtocol) *IBMLoadBalancerParametersApplyConfiguration { + b.Protocol = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/insightsreport.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/insightsreport.go index d2267250c..6c311be02 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/insightsreport.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/insightsreport.go @@ -2,9 +2,14 @@ package v1 +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + // InsightsReportApplyConfiguration represents an declarative configuration of the InsightsReport type for use // with apply. type InsightsReportApplyConfiguration struct { + DownloadedAt *v1.Time `json:"downloadedAt,omitempty"` HealthChecks []HealthCheckApplyConfiguration `json:"healthChecks,omitempty"` } @@ -14,6 +19,14 @@ func InsightsReport() *InsightsReportApplyConfiguration { return &InsightsReportApplyConfiguration{} } +// WithDownloadedAt sets the DownloadedAt field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DownloadedAt field is set to the value of the last call. +func (b *InsightsReportApplyConfiguration) WithDownloadedAt(value v1.Time) *InsightsReportApplyConfiguration { + b.DownloadedAt = &value + return b +} + // WithHealthChecks adds the given value to the HealthChecks field in the declarative configuration // and returns the receiver, so that objects can be build by chaining "With" function invocations. // If called multiple times, values provided by each call will be appended to the HealthChecks field. diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubeapiserverstatus.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubeapiserverstatus.go index 36c608fa5..36475f5f4 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubeapiserverstatus.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/kubeapiserverstatus.go @@ -6,6 +6,7 @@ package v1 // with apply. type KubeAPIServerStatusApplyConfiguration struct { StaticPodOperatorStatusApplyConfiguration `json:",inline"` + ServiceAccountIssuers []ServiceAccountIssuerStatusApplyConfiguration `json:"serviceAccountIssuers,omitempty"` } // KubeAPIServerStatusApplyConfiguration constructs an declarative configuration of the KubeAPIServerStatus type for use with @@ -92,3 +93,16 @@ func (b *KubeAPIServerStatusApplyConfiguration) WithNodeStatuses(values ...*Node } return b } + +// WithServiceAccountIssuers adds the given value to the ServiceAccountIssuers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the ServiceAccountIssuers field. +func (b *KubeAPIServerStatusApplyConfiguration) WithServiceAccountIssuers(values ...*ServiceAccountIssuerStatusApplyConfiguration) *KubeAPIServerStatusApplyConfiguration { + for i := range values { + if values[i] == nil { + panic("nil value passed to WithServiceAccountIssuers") + } + b.ServiceAccountIssuers = append(b.ServiceAccountIssuers, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/loadbalancerstrategy.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/loadbalancerstrategy.go index 088fa6724..d91dfb494 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/loadbalancerstrategy.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/loadbalancerstrategy.go @@ -10,6 +10,7 @@ import ( // with apply. type LoadBalancerStrategyApplyConfiguration struct { Scope *v1.LoadBalancerScope `json:"scope,omitempty"` + AllowedSourceRanges []v1.CIDR `json:"allowedSourceRanges,omitempty"` ProviderParameters *ProviderLoadBalancerParametersApplyConfiguration `json:"providerParameters,omitempty"` DNSManagementPolicy *v1.LoadBalancerDNSManagementPolicy `json:"dnsManagementPolicy,omitempty"` } @@ -28,6 +29,16 @@ func (b *LoadBalancerStrategyApplyConfiguration) WithScope(value v1.LoadBalancer return b } +// WithAllowedSourceRanges adds the given value to the AllowedSourceRanges field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the AllowedSourceRanges field. +func (b *LoadBalancerStrategyApplyConfiguration) WithAllowedSourceRanges(values ...v1.CIDR) *LoadBalancerStrategyApplyConfiguration { + for i := range values { + b.AllowedSourceRanges = append(b.AllowedSourceRanges, values[i]) + } + return b +} + // WithProviderParameters sets the ProviderParameters field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the ProviderParameters field is set to the value of the last call. diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/perspective.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/perspective.go new file mode 100644 index 000000000..d030b7f35 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/perspective.go @@ -0,0 +1,53 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// PerspectiveApplyConfiguration represents an declarative configuration of the Perspective type for use +// with apply. +type PerspectiveApplyConfiguration struct { + ID *string `json:"id,omitempty"` + Visibility *PerspectiveVisibilityApplyConfiguration `json:"visibility,omitempty"` + PinnedResources *[]PinnedResourceReferenceApplyConfiguration `json:"pinnedResources,omitempty"` +} + +// PerspectiveApplyConfiguration constructs an declarative configuration of the Perspective type for use with +// apply. +func Perspective() *PerspectiveApplyConfiguration { + return &PerspectiveApplyConfiguration{} +} + +// WithID sets the ID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ID field is set to the value of the last call. +func (b *PerspectiveApplyConfiguration) WithID(value string) *PerspectiveApplyConfiguration { + b.ID = &value + return b +} + +// WithVisibility sets the Visibility field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Visibility field is set to the value of the last call. +func (b *PerspectiveApplyConfiguration) WithVisibility(value *PerspectiveVisibilityApplyConfiguration) *PerspectiveApplyConfiguration { + b.Visibility = value + return b +} + +func (b *PerspectiveApplyConfiguration) ensurePinnedResourceReferenceApplyConfigurationExists() { + if b.PinnedResources == nil { + b.PinnedResources = &[]PinnedResourceReferenceApplyConfiguration{} + } +} + +// WithPinnedResources adds the given value to the PinnedResources field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the PinnedResources field. +func (b *PerspectiveApplyConfiguration) WithPinnedResources(values ...*PinnedResourceReferenceApplyConfiguration) *PerspectiveApplyConfiguration { + b.ensurePinnedResourceReferenceApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithPinnedResources") + } + *b.PinnedResources = append(*b.PinnedResources, *values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/perspectivevisibility.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/perspectivevisibility.go new file mode 100644 index 000000000..803ab87ef --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/perspectivevisibility.go @@ -0,0 +1,36 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "github.com/openshift/api/operator/v1" +) + +// PerspectiveVisibilityApplyConfiguration represents an declarative configuration of the PerspectiveVisibility type for use +// with apply. +type PerspectiveVisibilityApplyConfiguration struct { + State *v1.PerspectiveState `json:"state,omitempty"` + AccessReview *ResourceAttributesAccessReviewApplyConfiguration `json:"accessReview,omitempty"` +} + +// PerspectiveVisibilityApplyConfiguration constructs an declarative configuration of the PerspectiveVisibility type for use with +// apply. +func PerspectiveVisibility() *PerspectiveVisibilityApplyConfiguration { + return &PerspectiveVisibilityApplyConfiguration{} +} + +// WithState sets the State field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the State field is set to the value of the last call. +func (b *PerspectiveVisibilityApplyConfiguration) WithState(value v1.PerspectiveState) *PerspectiveVisibilityApplyConfiguration { + b.State = &value + return b +} + +// WithAccessReview sets the AccessReview field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the AccessReview field is set to the value of the last call. +func (b *PerspectiveVisibilityApplyConfiguration) WithAccessReview(value *ResourceAttributesAccessReviewApplyConfiguration) *PerspectiveVisibilityApplyConfiguration { + b.AccessReview = value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/pinnedresourcereference.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/pinnedresourcereference.go new file mode 100644 index 000000000..8dffc79a5 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/pinnedresourcereference.go @@ -0,0 +1,41 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// PinnedResourceReferenceApplyConfiguration represents an declarative configuration of the PinnedResourceReference type for use +// with apply. +type PinnedResourceReferenceApplyConfiguration struct { + Group *string `json:"group,omitempty"` + Version *string `json:"version,omitempty"` + Resource *string `json:"resource,omitempty"` +} + +// PinnedResourceReferenceApplyConfiguration constructs an declarative configuration of the PinnedResourceReference type for use with +// apply. +func PinnedResourceReference() *PinnedResourceReferenceApplyConfiguration { + return &PinnedResourceReferenceApplyConfiguration{} +} + +// WithGroup sets the Group field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Group field is set to the value of the last call. +func (b *PinnedResourceReferenceApplyConfiguration) WithGroup(value string) *PinnedResourceReferenceApplyConfiguration { + b.Group = &value + return b +} + +// WithVersion sets the Version field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Version field is set to the value of the last call. +func (b *PinnedResourceReferenceApplyConfiguration) WithVersion(value string) *PinnedResourceReferenceApplyConfiguration { + b.Version = &value + return b +} + +// WithResource sets the Resource field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Resource field is set to the value of the last call. +func (b *PinnedResourceReferenceApplyConfiguration) WithResource(value string) *PinnedResourceReferenceApplyConfiguration { + b.Resource = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/policyauditconfig.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/policyauditconfig.go index 22223436d..0e941459c 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/policyauditconfig.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/policyauditconfig.go @@ -7,6 +7,7 @@ package v1 type PolicyAuditConfigApplyConfiguration struct { RateLimit *uint32 `json:"rateLimit,omitempty"` MaxFileSize *uint32 `json:"maxFileSize,omitempty"` + MaxLogFiles *int32 `json:"maxLogFiles,omitempty"` Destination *string `json:"destination,omitempty"` SyslogFacility *string `json:"syslogFacility,omitempty"` } @@ -33,6 +34,14 @@ func (b *PolicyAuditConfigApplyConfiguration) WithMaxFileSize(value uint32) *Pol return b } +// WithMaxLogFiles sets the MaxLogFiles field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the MaxLogFiles field is set to the value of the last call. +func (b *PolicyAuditConfigApplyConfiguration) WithMaxLogFiles(value int32) *PolicyAuditConfigApplyConfiguration { + b.MaxLogFiles = &value + return b +} + // WithDestination sets the Destination field in the declarative configuration to the given value // and returns the receiver, so that objects can be built by chaining "With" function invocations. // If called multiple times, the Destination field is set to the value of the last call. diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/providerloadbalancerparameters.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/providerloadbalancerparameters.go index 9949a56f6..a528ab3f7 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/providerloadbalancerparameters.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/providerloadbalancerparameters.go @@ -12,6 +12,7 @@ type ProviderLoadBalancerParametersApplyConfiguration struct { Type *v1.LoadBalancerProviderType `json:"type,omitempty"` AWS *AWSLoadBalancerParametersApplyConfiguration `json:"aws,omitempty"` GCP *GCPLoadBalancerParametersApplyConfiguration `json:"gcp,omitempty"` + IBM *IBMLoadBalancerParametersApplyConfiguration `json:"ibm,omitempty"` } // ProviderLoadBalancerParametersApplyConfiguration constructs an declarative configuration of the ProviderLoadBalancerParameters type for use with @@ -43,3 +44,11 @@ func (b *ProviderLoadBalancerParametersApplyConfiguration) WithGCP(value *GCPLoa b.GCP = value return b } + +// WithIBM sets the IBM field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the IBM field is set to the value of the last call. +func (b *ProviderLoadBalancerParametersApplyConfiguration) WithIBM(value *IBMLoadBalancerParametersApplyConfiguration) *ProviderLoadBalancerParametersApplyConfiguration { + b.IBM = value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/resourceattributesaccessreview.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/resourceattributesaccessreview.go new file mode 100644 index 000000000..6c5d76077 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/resourceattributesaccessreview.go @@ -0,0 +1,40 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/authorization/v1" +) + +// ResourceAttributesAccessReviewApplyConfiguration represents an declarative configuration of the ResourceAttributesAccessReview type for use +// with apply. +type ResourceAttributesAccessReviewApplyConfiguration struct { + Required []v1.ResourceAttributes `json:"required,omitempty"` + Missing []v1.ResourceAttributes `json:"missing,omitempty"` +} + +// ResourceAttributesAccessReviewApplyConfiguration constructs an declarative configuration of the ResourceAttributesAccessReview type for use with +// apply. +func ResourceAttributesAccessReview() *ResourceAttributesAccessReviewApplyConfiguration { + return &ResourceAttributesAccessReviewApplyConfiguration{} +} + +// WithRequired adds the given value to the Required field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Required field. +func (b *ResourceAttributesAccessReviewApplyConfiguration) WithRequired(values ...v1.ResourceAttributes) *ResourceAttributesAccessReviewApplyConfiguration { + for i := range values { + b.Required = append(b.Required, values[i]) + } + return b +} + +// WithMissing adds the given value to the Missing field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Missing field. +func (b *ResourceAttributesAccessReviewApplyConfiguration) WithMissing(values ...v1.ResourceAttributes) *ResourceAttributesAccessReviewApplyConfiguration { + for i := range values { + b.Missing = append(b.Missing, values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/serviceaccountissuerstatus.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/serviceaccountissuerstatus.go new file mode 100644 index 000000000..b305dc053 --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/serviceaccountissuerstatus.go @@ -0,0 +1,36 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// ServiceAccountIssuerStatusApplyConfiguration represents an declarative configuration of the ServiceAccountIssuerStatus type for use +// with apply. +type ServiceAccountIssuerStatusApplyConfiguration struct { + Name *string `json:"name,omitempty"` + ExpirationTime *v1.Time `json:"expirationTime,omitempty"` +} + +// ServiceAccountIssuerStatusApplyConfiguration constructs an declarative configuration of the ServiceAccountIssuerStatus type for use with +// apply. +func ServiceAccountIssuerStatus() *ServiceAccountIssuerStatusApplyConfiguration { + return &ServiceAccountIssuerStatusApplyConfiguration{} +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ServiceAccountIssuerStatusApplyConfiguration) WithName(value string) *ServiceAccountIssuerStatusApplyConfiguration { + b.Name = &value + return b +} + +// WithExpirationTime sets the ExpirationTime field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ExpirationTime field is set to the value of the last call. +func (b *ServiceAccountIssuerStatusApplyConfiguration) WithExpirationTime(value v1.Time) *ServiceAccountIssuerStatusApplyConfiguration { + b.ExpirationTime = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/storagespec.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/storagespec.go index 226180e74..4e6deef2a 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/storagespec.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/storagespec.go @@ -11,6 +11,7 @@ import ( // with apply. type StorageSpecApplyConfiguration struct { OperatorSpecApplyConfiguration `json:",inline"` + VSphereStorageDriver *operatorv1.StorageDriverType `json:"vsphereStorageDriver,omitempty"` } // StorageSpecApplyConfiguration constructs an declarative configuration of the StorageSpec type for use with @@ -58,3 +59,11 @@ func (b *StorageSpecApplyConfiguration) WithObservedConfig(value runtime.RawExte b.ObservedConfig = &value return b } + +// WithVSphereStorageDriver sets the VSphereStorageDriver field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the VSphereStorageDriver field is set to the value of the last call. +func (b *StorageSpecApplyConfiguration) WithVSphereStorageDriver(value operatorv1.StorageDriverType) *StorageSpecApplyConfiguration { + b.VSphereStorageDriver = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/upstreamresolvers.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/upstreamresolvers.go index eb2ea2e00..941149eaf 100644 --- a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/upstreamresolvers.go +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/upstreamresolvers.go @@ -9,9 +9,10 @@ import ( // UpstreamResolversApplyConfiguration represents an declarative configuration of the UpstreamResolvers type for use // with apply. type UpstreamResolversApplyConfiguration struct { - Upstreams []UpstreamApplyConfiguration `json:"upstreams,omitempty"` - Policy *operatorv1.ForwardingPolicy `json:"policy,omitempty"` - TransportConfig *DNSTransportConfigApplyConfiguration `json:"transportConfig,omitempty"` + Upstreams []UpstreamApplyConfiguration `json:"upstreams,omitempty"` + Policy *operatorv1.ForwardingPolicy `json:"policy,omitempty"` + TransportConfig *DNSTransportConfigApplyConfiguration `json:"transportConfig,omitempty"` + ProtocolStrategy *operatorv1.ProtocolStrategy `json:"protocolStrategy,omitempty"` } // UpstreamResolversApplyConfiguration constructs an declarative configuration of the UpstreamResolvers type for use with @@ -48,3 +49,11 @@ func (b *UpstreamResolversApplyConfiguration) WithTransportConfig(value *DNSTran b.TransportConfig = value return b } + +// WithProtocolStrategy sets the ProtocolStrategy field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ProtocolStrategy field is set to the value of the last call. +func (b *UpstreamResolversApplyConfiguration) WithProtocolStrategy(value operatorv1.ProtocolStrategy) *UpstreamResolversApplyConfiguration { + b.ProtocolStrategy = &value + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/vspherecsidriverconfigspec.go b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/vspherecsidriverconfigspec.go new file mode 100644 index 000000000..027cd9dbf --- /dev/null +++ b/vendor/github.com/openshift/client-go/operator/applyconfigurations/operator/v1/vspherecsidriverconfigspec.go @@ -0,0 +1,25 @@ +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// VSphereCSIDriverConfigSpecApplyConfiguration represents an declarative configuration of the VSphereCSIDriverConfigSpec type for use +// with apply. +type VSphereCSIDriverConfigSpecApplyConfiguration struct { + TopologyCategories []string `json:"topologyCategories,omitempty"` +} + +// VSphereCSIDriverConfigSpecApplyConfiguration constructs an declarative configuration of the VSphereCSIDriverConfigSpec type for use with +// apply. +func VSphereCSIDriverConfigSpec() *VSphereCSIDriverConfigSpecApplyConfiguration { + return &VSphereCSIDriverConfigSpecApplyConfiguration{} +} + +// WithTopologyCategories adds the given value to the TopologyCategories field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the TopologyCategories field. +func (b *VSphereCSIDriverConfigSpecApplyConfiguration) WithTopologyCategories(values ...string) *VSphereCSIDriverConfigSpecApplyConfiguration { + for i := range values { + b.TopologyCategories = append(b.TopologyCategories, values[i]) + } + return b +} diff --git a/vendor/github.com/openshift/client-go/operator/clientset/versioned/scheme/register.go b/vendor/github.com/openshift/client-go/operator/clientset/versioned/scheme/register.go index 0a99d662e..04697bcea 100644 --- a/vendor/github.com/openshift/client-go/operator/clientset/versioned/scheme/register.go +++ b/vendor/github.com/openshift/client-go/operator/clientset/versioned/scheme/register.go @@ -23,14 +23,14 @@ var localSchemeBuilder = runtime.SchemeBuilder{ // AddToScheme adds all types of this clientset into the given scheme. This allows composition // of clientsets, like in: // -// import ( -// "k8s.io/client-go/kubernetes" -// clientsetscheme "k8s.io/client-go/kubernetes/scheme" -// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" -// ) +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) // -// kclientset, _ := kubernetes.NewForConfig(c) -// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) // // After this, RawExtensions in Kubernetes types will serialize kube-aggregator types // correctly. diff --git a/vendor/github.com/openshift/library-go/LICENSE b/vendor/github.com/openshift/library-go/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/github.com/openshift/library-go/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/openshift/library-go/pkg/controller/factory/base_controller.go b/vendor/github.com/openshift/library-go/pkg/controller/factory/base_controller.go new file mode 100644 index 000000000..722d95d5e --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/controller/factory/base_controller.go @@ -0,0 +1,276 @@ +package factory + +import ( + "context" + "errors" + "fmt" + "sync" + "time" + + "github.com/robfig/cron" + apierrors "k8s.io/apimachinery/pkg/api/errors" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog/v2" + + operatorv1 "github.com/openshift/api/operator/v1" + "github.com/openshift/library-go/pkg/operator/management" + "github.com/openshift/library-go/pkg/operator/v1helpers" + operatorv1helpers "github.com/openshift/library-go/pkg/operator/v1helpers" +) + +// SyntheticRequeueError can be returned from sync() in case of forcing a sync() retry artificially. +// This can be also done by re-adding the key to queue, but this is cheaper and more convenient. +var SyntheticRequeueError = errors.New("synthetic requeue request") + +var defaultCacheSyncTimeout = 10 * time.Minute + +// baseController represents generic Kubernetes controller boiler-plate +type baseController struct { + name string + cachesToSync []cache.InformerSynced + sync func(ctx context.Context, controllerContext SyncContext) error + syncContext SyncContext + syncDegradedClient operatorv1helpers.OperatorClient + resyncEvery time.Duration + resyncSchedules []cron.Schedule + postStartHooks []PostStartHook + cacheSyncTimeout time.Duration +} + +var _ Controller = &baseController{} + +func (c baseController) Name() string { + return c.name +} + +type scheduledJob struct { + queue workqueue.RateLimitingInterface + name string +} + +func newScheduledJob(name string, queue workqueue.RateLimitingInterface) cron.Job { + return &scheduledJob{ + queue: queue, + name: name, + } +} + +func (s *scheduledJob) Run() { + klog.V(4).Infof("Triggering scheduled %q controller run", s.name) + s.queue.Add(DefaultQueueKey) +} + +func waitForNamedCacheSync(controllerName string, stopCh <-chan struct{}, cacheSyncs ...cache.InformerSynced) error { + klog.Infof("Waiting for caches to sync for %s", controllerName) + + if !cache.WaitForCacheSync(stopCh, cacheSyncs...) { + return fmt.Errorf("unable to sync caches for %s", controllerName) + } + + klog.Infof("Caches are synced for %s ", controllerName) + + return nil +} + +func (c *baseController) Run(ctx context.Context, workers int) { + // HandleCrash recovers panics + defer utilruntime.HandleCrash(c.degradedPanicHandler) + + // give caches 10 minutes to sync + cacheSyncCtx, cacheSyncCancel := context.WithTimeout(ctx, c.cacheSyncTimeout) + defer cacheSyncCancel() + err := waitForNamedCacheSync(c.name, cacheSyncCtx.Done(), c.cachesToSync...) + if err != nil { + select { + case <-ctx.Done(): + // Exit gracefully because the controller was requested to stop. + return + default: + // If caches did not sync after 10 minutes, it has taken oddly long and + // we should provide feedback. Since the control loops will never start, + // it is safer to exit with a good message than to continue with a dead loop. + // TODO: Consider making this behavior configurable. + klog.Exit(err) + } + } + + var workerWg sync.WaitGroup + defer func() { + defer klog.Infof("All %s workers have been terminated", c.name) + workerWg.Wait() + }() + + // queueContext is used to track and initiate queue shutdown + queueContext, queueContextCancel := context.WithCancel(context.TODO()) + + for i := 1; i <= workers; i++ { + klog.Infof("Starting #%d worker of %s controller ...", i, c.name) + workerWg.Add(1) + go func() { + defer func() { + klog.Infof("Shutting down worker of %s controller ...", c.name) + workerWg.Done() + }() + c.runWorker(queueContext) + }() + } + + // if scheduled run is requested, run the cron scheduler + if c.resyncSchedules != nil { + scheduler := cron.New() + for _, s := range c.resyncSchedules { + scheduler.Schedule(s, newScheduledJob(c.name, c.syncContext.Queue())) + } + scheduler.Start() + defer scheduler.Stop() + } + + // runPeriodicalResync is independent from queue + if c.resyncEvery > 0 { + workerWg.Add(1) + if c.resyncEvery < 60*time.Second { + // Warn about too fast resyncs as they might drain the operators QPS. + // This event is cheap as it is only emitted on operator startup. + c.syncContext.Recorder().Warningf("FastControllerResync", "Controller %q resync interval is set to %s which might lead to client request throttling", c.name, c.resyncEvery) + } + go func() { + defer workerWg.Done() + wait.UntilWithContext(ctx, func(ctx context.Context) { c.syncContext.Queue().Add(DefaultQueueKey) }, c.resyncEvery) + }() + } + + // run post-start hooks (custom triggers, etc.) + if len(c.postStartHooks) > 0 { + var hookWg sync.WaitGroup + defer func() { + hookWg.Wait() // wait for the post-start hooks + klog.Infof("All %s post start hooks have been terminated", c.name) + }() + for i := range c.postStartHooks { + hookWg.Add(1) + go func(index int) { + defer hookWg.Done() + if err := c.postStartHooks[index](ctx, c.syncContext); err != nil { + klog.Warningf("%s controller post start hook error: %v", c.name, err) + } + }(i) + } + } + + // Handle controller shutdown + + <-ctx.Done() // wait for controller context to be cancelled + c.syncContext.Queue().ShutDown() // shutdown the controller queue first + queueContextCancel() // cancel the queue context, which tell workers to initiate shutdown + + // Wait for all workers to finish their job. + // at this point the Run() can hang and caller have to implement the logic that will kill + // this controller (SIGKILL). + klog.Infof("Shutting down %s ...", c.name) +} + +func (c *baseController) Sync(ctx context.Context, syncCtx SyncContext) error { + return c.sync(ctx, syncCtx) +} + +// runWorker runs a single worker +// The worker is asked to terminate when the passed context is cancelled and is given terminationGraceDuration time +// to complete its shutdown. +func (c *baseController) runWorker(queueCtx context.Context) { + wait.UntilWithContext( + queueCtx, + func(queueCtx context.Context) { + defer utilruntime.HandleCrash(c.degradedPanicHandler) + for { + select { + case <-queueCtx.Done(): + return + default: + c.processNextWorkItem(queueCtx) + } + } + }, + 1*time.Second) +} + +// reconcile wraps the sync() call and if operator client is set, it handle the degraded condition if sync() returns an error. +func (c *baseController) reconcile(ctx context.Context, syncCtx SyncContext) error { + err := c.sync(ctx, syncCtx) + degradedErr := c.reportDegraded(ctx, err) + if apierrors.IsNotFound(degradedErr) && management.IsOperatorRemovable() { + // The operator tolerates missing CR, therefore don't report it up. + return err + } + return degradedErr +} + +// degradedPanicHandler will go degraded on failures, then we should catch potential panics and covert them into bad status. +func (c *baseController) degradedPanicHandler(panicVal interface{}) { + if c.syncDegradedClient == nil { + // if we don't have a client for reporting degraded condition, then let the existing panic handler do the work + return + } + _ = c.reportDegraded(context.TODO(), fmt.Errorf("panic caught:\n%v", panicVal)) +} + +// reportDegraded updates status with an indication of degraded-ness +func (c *baseController) reportDegraded(ctx context.Context, reportedError error) error { + if c.syncDegradedClient == nil { + return reportedError + } + if reportedError != nil { + _, _, updateErr := v1helpers.UpdateStatus(ctx, c.syncDegradedClient, v1helpers.UpdateConditionFn(operatorv1.OperatorCondition{ + Type: c.name + "Degraded", + Status: operatorv1.ConditionTrue, + Reason: "SyncError", + Message: reportedError.Error(), + })) + if updateErr != nil { + klog.Warningf("Updating status of %q failed: %v", c.Name(), updateErr) + } + return reportedError + } + _, _, updateErr := v1helpers.UpdateStatus(ctx, c.syncDegradedClient, + v1helpers.UpdateConditionFn(operatorv1.OperatorCondition{ + Type: c.name + "Degraded", + Status: operatorv1.ConditionFalse, + Reason: "AsExpected", + })) + return updateErr +} + +func (c *baseController) processNextWorkItem(queueCtx context.Context) { + key, quit := c.syncContext.Queue().Get() + if quit { + return + } + defer c.syncContext.Queue().Done(key) + + syncCtx := c.syncContext.(syncContext) + var ok bool + syncCtx.queueKey, ok = key.(string) + if !ok { + utilruntime.HandleError(fmt.Errorf("%q controller failed to process key %q (not a string)", c.name, key)) + return + } + + if err := c.reconcile(queueCtx, syncCtx); err != nil { + if err == SyntheticRequeueError { + // logging this helps detecting wedged controllers with missing pre-requirements + klog.V(5).Infof("%q controller requested synthetic requeue with key %q", c.name, key) + } else { + if klog.V(4).Enabled() || key != "key" { + utilruntime.HandleError(fmt.Errorf("%q controller failed to sync %q, err: %w", c.name, key, err)) + } else { + utilruntime.HandleError(fmt.Errorf("%s reconciliation failed: %w", c.name, err)) + } + } + c.syncContext.Queue().AddRateLimited(key) + return + } + + c.syncContext.Queue().Forget(key) +} diff --git a/vendor/github.com/openshift/library-go/pkg/controller/factory/controller_context.go b/vendor/github.com/openshift/library-go/pkg/controller/factory/controller_context.go new file mode 100644 index 000000000..3c585e40a --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/controller/factory/controller_context.go @@ -0,0 +1,116 @@ +package factory + +import ( + "fmt" + "strings" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + + "github.com/openshift/library-go/pkg/operator/events" +) + +// syncContext implements SyncContext and provide user access to queue and object that caused +// the sync to be triggered. +type syncContext struct { + eventRecorder events.Recorder + queue workqueue.RateLimitingInterface + queueKey string +} + +var _ SyncContext = syncContext{} + +// NewSyncContext gives new sync context. +func NewSyncContext(name string, recorder events.Recorder) SyncContext { + return syncContext{ + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), name), + eventRecorder: recorder.WithComponentSuffix(strings.ToLower(name)), + } +} + +func (c syncContext) Queue() workqueue.RateLimitingInterface { + return c.queue +} + +func (c syncContext) QueueKey() string { + return c.queueKey +} + +func (c syncContext) Recorder() events.Recorder { + return c.eventRecorder +} + +// eventHandler provides default event handler that is added to an informers passed to controller factory. +func (c syncContext) eventHandler(queueKeysFunc ObjectQueueKeysFunc, filter EventFilterFunc) cache.ResourceEventHandler { + resourceEventHandler := cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + runtimeObj, ok := obj.(runtime.Object) + if !ok { + utilruntime.HandleError(fmt.Errorf("added object %+v is not runtime Object", obj)) + return + } + c.enqueueKeys(queueKeysFunc(runtimeObj)...) + }, + UpdateFunc: func(old, new interface{}) { + runtimeObj, ok := new.(runtime.Object) + if !ok { + utilruntime.HandleError(fmt.Errorf("updated object %+v is not runtime Object", runtimeObj)) + return + } + c.enqueueKeys(queueKeysFunc(runtimeObj)...) + }, + DeleteFunc: func(obj interface{}) { + runtimeObj, ok := obj.(runtime.Object) + if !ok { + if tombstone, ok := obj.(cache.DeletedFinalStateUnknown); ok { + c.enqueueKeys(queueKeysFunc(tombstone.Obj.(runtime.Object))...) + + return + } + utilruntime.HandleError(fmt.Errorf("updated object %+v is not runtime Object", runtimeObj)) + return + } + c.enqueueKeys(queueKeysFunc(runtimeObj)...) + }, + } + if filter == nil { + return resourceEventHandler + } + return cache.FilteringResourceEventHandler{ + FilterFunc: filter, + Handler: resourceEventHandler, + } +} + +func (c syncContext) enqueueKeys(keys ...string) { + for _, qKey := range keys { + c.queue.Add(qKey) + } +} + +// namespaceChecker returns a function which returns true if an inpuut obj +// (or its tombstone) is a namespace and it matches a name of any namespaces +// that we are interested in +func namespaceChecker(interestingNamespaces []string) func(obj interface{}) bool { + interestingNamespacesSet := sets.NewString(interestingNamespaces...) + + return func(obj interface{}) bool { + ns, ok := obj.(*corev1.Namespace) + if ok { + return interestingNamespacesSet.Has(ns.Name) + } + + // the object might be getting deleted + tombstone, ok := obj.(cache.DeletedFinalStateUnknown) + if ok { + if ns, ok := tombstone.Obj.(*corev1.Namespace); ok { + return interestingNamespacesSet.Has(ns.Name) + } + } + return false + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/controller/factory/eventfilters.go b/vendor/github.com/openshift/library-go/pkg/controller/factory/eventfilters.go new file mode 100644 index 000000000..b70da9548 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/controller/factory/eventfilters.go @@ -0,0 +1,26 @@ +package factory + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/sets" +) + +func ObjectNameToKey(obj runtime.Object) string { + metaObj, ok := obj.(metav1.ObjectMetaAccessor) + if !ok { + return "" + } + return metaObj.GetObjectMeta().GetName() +} + +func NamesFilter(names ...string) EventFilterFunc { + nameSet := sets.NewString(names...) + return func(obj interface{}) bool { + metaObj, ok := obj.(metav1.ObjectMetaAccessor) + if !ok { + return false + } + return nameSet.Has(metaObj.GetObjectMeta().GetName()) + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/controller/factory/factory.go b/vendor/github.com/openshift/library-go/pkg/controller/factory/factory.go new file mode 100644 index 000000000..8f910f672 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/controller/factory/factory.go @@ -0,0 +1,309 @@ +package factory + +import ( + "context" + "fmt" + "time" + + "github.com/robfig/cron" + "k8s.io/apimachinery/pkg/runtime" + errorutil "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/client-go/tools/cache" + + "github.com/openshift/library-go/pkg/operator/events" + operatorv1helpers "github.com/openshift/library-go/pkg/operator/v1helpers" +) + +// DefaultQueueKey is the queue key used for string trigger based controllers. +const DefaultQueueKey = "key" + +// DefaultQueueKeysFunc returns a slice with a single element - the DefaultQueueKey +func DefaultQueueKeysFunc(_ runtime.Object) []string { + return []string{DefaultQueueKey} +} + +// Factory is generator that generate standard Kubernetes controllers. +// Factory is really generic and should be only used for simple controllers that does not require special stuff.. +type Factory struct { + sync SyncFunc + syncContext SyncContext + syncDegradedClient operatorv1helpers.OperatorClient + resyncInterval time.Duration + resyncSchedules []string + informers []filteredInformers + informerQueueKeys []informersWithQueueKey + bareInformers []Informer + postStartHooks []PostStartHook + namespaceInformers []*namespaceInformer + cachesToSync []cache.InformerSynced + interestingNamespaces sets.String +} + +// Informer represents any structure that allow to register event handlers and informs if caches are synced. +// Any SharedInformer will comply. +type Informer interface { + AddEventHandler(handler cache.ResourceEventHandler) (cache.ResourceEventHandlerRegistration, error) + HasSynced() bool +} + +type namespaceInformer struct { + informer Informer + nsFilter EventFilterFunc +} + +type informersWithQueueKey struct { + informers []Informer + filter EventFilterFunc + queueKeyFn ObjectQueueKeysFunc +} + +type filteredInformers struct { + informers []Informer + filter EventFilterFunc +} + +// PostStartHook specify a function that will run after controller is started. +// The context is cancelled when the controller is asked to shutdown and the post start hook should terminate as well. +// The syncContext allow access to controller queue and event recorder. +type PostStartHook func(ctx context.Context, syncContext SyncContext) error + +// ObjectQueueKeyFunc is used to make a string work queue key out of the runtime object that is passed to it. +// This can extract the "namespace/name" if you need to or just return "key" if you building controller that only use string +// triggers. +// DEPRECATED: use ObjectQueueKeysFunc instead +type ObjectQueueKeyFunc func(runtime.Object) string + +// ObjectQueueKeysFunc is used to make a string work queue keys out of the runtime object that is passed to it. +// This can extract the "namespace/name" if you need to or just return "key" if you building controller that only use string +// triggers. +type ObjectQueueKeysFunc func(runtime.Object) []string + +// EventFilterFunc is used to filter informer events to prevent Sync() from being called +type EventFilterFunc func(obj interface{}) bool + +// New return new factory instance. +func New() *Factory { + return &Factory{} +} + +// Sync is used to set the controller synchronization function. This function is the core of the controller and is +// usually hold the main controller logic. +func (f *Factory) WithSync(syncFn SyncFunc) *Factory { + f.sync = syncFn + return f +} + +// WithInformers is used to register event handlers and get the caches synchronized functions. +// Pass informers you want to use to react to changes on resources. If informer event is observed, then the Sync() function +// is called. +func (f *Factory) WithInformers(informers ...Informer) *Factory { + f.WithFilteredEventsInformers(nil, informers...) + return f +} + +// WithFilteredEventsInformers is used to register event handlers and get the caches synchronized functions. +// Pass the informers you want to use to react to changes on resources. If informer event is observed, then the Sync() function +// is called. +// Pass filter to filter out events that should not trigger Sync() call. +func (f *Factory) WithFilteredEventsInformers(filter EventFilterFunc, informers ...Informer) *Factory { + f.informers = append(f.informers, filteredInformers{ + informers: informers, + filter: filter, + }) + return f +} + +// WithBareInformers allow to register informer that already has custom event handlers registered and no additional +// event handlers will be added to this informer. +// The controller will wait for the cache of this informer to be synced. +// The existing event handlers will have to respect the queue key function or the sync() implementation will have to +// count with custom queue keys. +func (f *Factory) WithBareInformers(informers ...Informer) *Factory { + f.bareInformers = append(f.bareInformers, informers...) + return f +} + +// WithInformersQueueKeyFunc is used to register event handlers and get the caches synchronized functions. +// Pass informers you want to use to react to changes on resources. If informer event is observed, then the Sync() function +// is called. +// Pass the queueKeyFn you want to use to transform the informer runtime.Object into string key used by work queue. +func (f *Factory) WithInformersQueueKeyFunc(queueKeyFn ObjectQueueKeyFunc, informers ...Informer) *Factory { + f.informerQueueKeys = append(f.informerQueueKeys, informersWithQueueKey{ + informers: informers, + queueKeyFn: func(o runtime.Object) []string { + return []string{queueKeyFn(o)} + }, + }) + return f +} + +// WithFilteredEventsInformersQueueKeyFunc is used to register event handlers and get the caches synchronized functions. +// Pass informers you want to use to react to changes on resources. If informer event is observed, then the Sync() function +// is called. +// Pass the queueKeyFn you want to use to transform the informer runtime.Object into string key used by work queue. +// Pass filter to filter out events that should not trigger Sync() call. +func (f *Factory) WithFilteredEventsInformersQueueKeyFunc(queueKeyFn ObjectQueueKeyFunc, filter EventFilterFunc, informers ...Informer) *Factory { + f.informerQueueKeys = append(f.informerQueueKeys, informersWithQueueKey{ + informers: informers, + filter: filter, + queueKeyFn: func(o runtime.Object) []string { + return []string{queueKeyFn(o)} + }, + }) + return f +} + +// WithInformersQueueKeysFunc is used to register event handlers and get the caches synchronized functions. +// Pass informers you want to use to react to changes on resources. If informer event is observed, then the Sync() function +// is called. +// Pass the queueKeyFn you want to use to transform the informer runtime.Object into string key used by work queue. +func (f *Factory) WithInformersQueueKeysFunc(queueKeyFn ObjectQueueKeysFunc, informers ...Informer) *Factory { + f.informerQueueKeys = append(f.informerQueueKeys, informersWithQueueKey{ + informers: informers, + queueKeyFn: queueKeyFn, + }) + return f +} + +// WithFilteredEventsInformersQueueKeysFunc is used to register event handlers and get the caches synchronized functions. +// Pass informers you want to use to react to changes on resources. If informer event is observed, then the Sync() function +// is called. +// Pass the queueKeyFn you want to use to transform the informer runtime.Object into string key used by work queue. +// Pass filter to filter out events that should not trigger Sync() call. +func (f *Factory) WithFilteredEventsInformersQueueKeysFunc(queueKeyFn ObjectQueueKeysFunc, filter EventFilterFunc, informers ...Informer) *Factory { + f.informerQueueKeys = append(f.informerQueueKeys, informersWithQueueKey{ + informers: informers, + filter: filter, + queueKeyFn: queueKeyFn, + }) + return f +} + +// WithPostStartHooks allows to register functions that will run asynchronously after the controller is started via Run command. +func (f *Factory) WithPostStartHooks(hooks ...PostStartHook) *Factory { + f.postStartHooks = append(f.postStartHooks, hooks...) + return f +} + +// WithNamespaceInformer is used to register event handlers and get the caches synchronized functions. +// The sync function will only trigger when the object observed by this informer is a namespace and its name matches the interestingNamespaces. +// Do not use this to register non-namespace informers. +func (f *Factory) WithNamespaceInformer(informer Informer, interestingNamespaces ...string) *Factory { + f.namespaceInformers = append(f.namespaceInformers, &namespaceInformer{ + informer: informer, + nsFilter: namespaceChecker(interestingNamespaces), + }) + return f +} + +// ResyncEvery will cause the Sync() function to be called periodically, regardless of informers. +// This is useful when you want to refresh every N minutes or you fear that your informers can be stucked. +// If this is not called, no periodical resync will happen. +// Note: The controller context passed to Sync() function in this case does not contain the object metadata or object itself. +// +// This can be used to detect periodical resyncs, but normal Sync() have to be cautious about `nil` objects. +func (f *Factory) ResyncEvery(interval time.Duration) *Factory { + f.resyncInterval = interval + return f +} + +// ResyncSchedule allows to supply a Cron syntax schedule that will be used to schedule the sync() call runs. +// This allows more fine-tuned controller scheduling than ResyncEvery. +// Examples: +// +// factory.New().ResyncSchedule("@every 1s").ToController() // Every second +// factory.New().ResyncSchedule("@hourly").ToController() // Every hour +// factory.New().ResyncSchedule("30 * * * *").ToController() // Every hour on the half hour +// +// Note: The controller context passed to Sync() function in this case does not contain the object metadata or object itself. +// +// This can be used to detect periodical resyncs, but normal Sync() have to be cautious about `nil` objects. +func (f *Factory) ResyncSchedule(schedules ...string) *Factory { + f.resyncSchedules = append(f.resyncSchedules, schedules...) + return f +} + +// WithSyncContext allows to specify custom, existing sync context for this factory. +// This is useful during unit testing where you can override the default event recorder or mock the runtime objects. +// If this function not called, a SyncContext is created by the factory automatically. +func (f *Factory) WithSyncContext(ctx SyncContext) *Factory { + f.syncContext = ctx + return f +} + +// WithSyncDegradedOnError encapsulate the controller sync() function, so when this function return an error, the operator client +// is used to set the degraded condition to (eg. "ControllerFooDegraded"). The degraded condition name is set based on the controller name. +func (f *Factory) WithSyncDegradedOnError(operatorClient operatorv1helpers.OperatorClient) *Factory { + f.syncDegradedClient = operatorClient + return f +} + +// Controller produce a runnable controller. +func (f *Factory) ToController(name string, eventRecorder events.Recorder) Controller { + if f.sync == nil { + panic(fmt.Errorf("WithSync() must be used before calling ToController() in %q", name)) + } + + var ctx SyncContext + if f.syncContext != nil { + ctx = f.syncContext + } else { + ctx = NewSyncContext(name, eventRecorder) + } + + var cronSchedules []cron.Schedule + if len(f.resyncSchedules) > 0 { + var errors []error + for _, schedule := range f.resyncSchedules { + if s, err := cron.ParseStandard(schedule); err != nil { + errors = append(errors, err) + } else { + cronSchedules = append(cronSchedules, s) + } + } + if err := errorutil.NewAggregate(errors); err != nil { + panic(fmt.Errorf("failed to parse controller schedules for %q: %v", name, err)) + } + } + + c := &baseController{ + name: name, + syncDegradedClient: f.syncDegradedClient, + sync: f.sync, + resyncEvery: f.resyncInterval, + resyncSchedules: cronSchedules, + cachesToSync: append([]cache.InformerSynced{}, f.cachesToSync...), + syncContext: ctx, + postStartHooks: f.postStartHooks, + cacheSyncTimeout: defaultCacheSyncTimeout, + } + + for i := range f.informerQueueKeys { + for d := range f.informerQueueKeys[i].informers { + informer := f.informerQueueKeys[i].informers[d] + queueKeyFn := f.informerQueueKeys[i].queueKeyFn + informer.AddEventHandler(c.syncContext.(syncContext).eventHandler(queueKeyFn, f.informerQueueKeys[i].filter)) + c.cachesToSync = append(c.cachesToSync, informer.HasSynced) + } + } + + for i := range f.informers { + for d := range f.informers[i].informers { + informer := f.informers[i].informers[d] + informer.AddEventHandler(c.syncContext.(syncContext).eventHandler(DefaultQueueKeysFunc, f.informers[i].filter)) + c.cachesToSync = append(c.cachesToSync, informer.HasSynced) + } + } + + for i := range f.bareInformers { + c.cachesToSync = append(c.cachesToSync, f.bareInformers[i].HasSynced) + } + + for i := range f.namespaceInformers { + f.namespaceInformers[i].informer.AddEventHandler(c.syncContext.(syncContext).eventHandler(DefaultQueueKeysFunc, f.namespaceInformers[i].nsFilter)) + c.cachesToSync = append(c.cachesToSync, f.namespaceInformers[i].informer.HasSynced) + } + + return c +} diff --git a/vendor/github.com/openshift/library-go/pkg/controller/factory/interfaces.go b/vendor/github.com/openshift/library-go/pkg/controller/factory/interfaces.go new file mode 100644 index 000000000..0ef98c670 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/controller/factory/interfaces.go @@ -0,0 +1,47 @@ +package factory + +import ( + "context" + + "k8s.io/client-go/util/workqueue" + + "github.com/openshift/library-go/pkg/operator/events" +) + +// Controller interface represents a runnable Kubernetes controller. +// Cancelling the syncContext passed will cause the controller to shutdown. +// Number of workers determine how much parallel the job processing should be. +type Controller interface { + // Run runs the controller and blocks until the controller is finished. + // Number of workers can be specified via workers parameter. + // This function will return when all internal loops are finished. + // Note that having more than one worker usually means handing parallelization of Sync(). + Run(ctx context.Context, workers int) + + // Sync contain the main controller logic. + // This should not be called directly, but can be used in unit tests to exercise the sync. + Sync(ctx context.Context, controllerContext SyncContext) error + + // Name returns the controller name string. + Name() string +} + +// SyncContext interface represents a context given to the Sync() function where the main controller logic happen. +// SyncContext exposes controller name and give user access to the queue (for manual requeue). +// SyncContext also provides metadata about object that informers observed as changed. +type SyncContext interface { + // Queue gives access to controller queue. This can be used for manual requeue, although if a Sync() function return + // an error, the object is automatically re-queued. Use with caution. + Queue() workqueue.RateLimitingInterface + + // QueueKey represents the queue key passed to the Sync function. + QueueKey() string + + // Recorder provide access to event recorder. + Recorder() events.Recorder +} + +// SyncFunc is a function that contain main controller logic. +// The syncContext.syncContext passed is the main controller syncContext, when cancelled it means the controller is being shut down. +// The syncContext provides access to controller name, queue and event recorder. +type SyncFunc func(ctx context.Context, controllerContext SyncContext) error diff --git a/vendor/github.com/openshift/library-go/pkg/crypto/OWNERS b/vendor/github.com/openshift/library-go/pkg/crypto/OWNERS new file mode 100644 index 000000000..4d4ce5ab9 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/crypto/OWNERS @@ -0,0 +1,4 @@ +reviewers: + - stlaz +approvers: + - stlaz diff --git a/vendor/github.com/openshift/library-go/pkg/crypto/crypto.go b/vendor/github.com/openshift/library-go/pkg/crypto/crypto.go new file mode 100644 index 000000000..554112c49 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/crypto/crypto.go @@ -0,0 +1,1252 @@ +package crypto + +import ( + "bytes" + "crypto" + "crypto/ecdsa" + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "errors" + "fmt" + "io" + "math/big" + mathrand "math/rand" + "net" + "os" + "path/filepath" + "reflect" + "sort" + "strconv" + "sync" + "time" + + "k8s.io/klog/v2" + + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apiserver/pkg/authentication/user" + "k8s.io/client-go/util/cert" +) + +// TLS versions that are known to golang. Go 1.13 adds support for +// TLS 1.3 that's opt-out with a build flag. +var versions = map[string]uint16{ + "VersionTLS10": tls.VersionTLS10, + "VersionTLS11": tls.VersionTLS11, + "VersionTLS12": tls.VersionTLS12, + "VersionTLS13": tls.VersionTLS13, +} + +// TLS versions that are enabled. +var supportedVersions = map[string]uint16{ + "VersionTLS10": tls.VersionTLS10, + "VersionTLS11": tls.VersionTLS11, + "VersionTLS12": tls.VersionTLS12, + "VersionTLS13": tls.VersionTLS13, +} + +// TLSVersionToNameOrDie given a tls version as an int, return its readable name +func TLSVersionToNameOrDie(intVal uint16) string { + matches := []string{} + for key, version := range versions { + if version == intVal { + matches = append(matches, key) + } + } + + if len(matches) == 0 { + panic(fmt.Sprintf("no name found for %d", intVal)) + } + if len(matches) > 1 { + panic(fmt.Sprintf("multiple names found for %d: %v", intVal, matches)) + } + return matches[0] +} + +func TLSVersion(versionName string) (uint16, error) { + if len(versionName) == 0 { + return DefaultTLSVersion(), nil + } + if version, ok := versions[versionName]; ok { + return version, nil + } + return 0, fmt.Errorf("unknown tls version %q", versionName) +} +func TLSVersionOrDie(versionName string) uint16 { + version, err := TLSVersion(versionName) + if err != nil { + panic(err) + } + return version +} + +// TLS versions that are known to golang, but may not necessarily be enabled. +func GolangTLSVersions() []string { + supported := []string{} + for k := range versions { + supported = append(supported, k) + } + sort.Strings(supported) + return supported +} + +// Returns the build enabled TLS versions. +func ValidTLSVersions() []string { + validVersions := []string{} + for k := range supportedVersions { + validVersions = append(validVersions, k) + } + sort.Strings(validVersions) + return validVersions +} +func DefaultTLSVersion() uint16 { + // Can't use SSLv3 because of POODLE and BEAST + // Can't use TLSv1.0 because of POODLE and BEAST using CBC cipher + // Can't use TLSv1.1 because of RC4 cipher usage + return tls.VersionTLS12 +} + +// ciphersTLS13 copies golang 1.13 implementation, where TLS1.3 suites are not +// configurable (cipherSuites field is ignored for TLS1.3 flows and all of the +// below three - and none other - are used) +var ciphersTLS13 = map[string]uint16{ + "TLS_AES_128_GCM_SHA256": tls.TLS_AES_128_GCM_SHA256, + "TLS_AES_256_GCM_SHA384": tls.TLS_AES_256_GCM_SHA384, + "TLS_CHACHA20_POLY1305_SHA256": tls.TLS_CHACHA20_POLY1305_SHA256, +} + +var ciphers = map[string]uint16{ + "TLS_RSA_WITH_RC4_128_SHA": tls.TLS_RSA_WITH_RC4_128_SHA, + "TLS_RSA_WITH_3DES_EDE_CBC_SHA": tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA, + "TLS_RSA_WITH_AES_128_CBC_SHA": tls.TLS_RSA_WITH_AES_128_CBC_SHA, + "TLS_RSA_WITH_AES_256_CBC_SHA": tls.TLS_RSA_WITH_AES_256_CBC_SHA, + "TLS_RSA_WITH_AES_128_CBC_SHA256": tls.TLS_RSA_WITH_AES_128_CBC_SHA256, + "TLS_RSA_WITH_AES_128_GCM_SHA256": tls.TLS_RSA_WITH_AES_128_GCM_SHA256, + "TLS_RSA_WITH_AES_256_GCM_SHA384": tls.TLS_RSA_WITH_AES_256_GCM_SHA384, + "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA": tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, + "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA": tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, + "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA": tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, + "TLS_ECDHE_RSA_WITH_RC4_128_SHA": tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA, + "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, + "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, + "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA": tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, + "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256": tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, + "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256": tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, + "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256": tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384": tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, + "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384": tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305": tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, + "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305": tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, + "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256": tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, + "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256": tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, +} + +// openSSLToIANACiphersMap maps OpenSSL cipher suite names to IANA names +// ref: https://www.iana.org/assignments/tls-parameters/tls-parameters.xml +var openSSLToIANACiphersMap = map[string]string{ + // TLS 1.3 ciphers - not configurable in go 1.13, all of them are used in TLSv1.3 flows + // "TLS_AES_128_GCM_SHA256": "TLS_AES_128_GCM_SHA256", // 0x13,0x01 + // "TLS_AES_256_GCM_SHA384": "TLS_AES_256_GCM_SHA384", // 0x13,0x02 + // "TLS_CHACHA20_POLY1305_SHA256": "TLS_CHACHA20_POLY1305_SHA256", // 0x13,0x03 + + // TLS 1.2 + "ECDHE-ECDSA-AES128-GCM-SHA256": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", // 0xC0,0x2B + "ECDHE-RSA-AES128-GCM-SHA256": "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", // 0xC0,0x2F + "ECDHE-ECDSA-AES256-GCM-SHA384": "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", // 0xC0,0x2C + "ECDHE-RSA-AES256-GCM-SHA384": "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", // 0xC0,0x30 + "ECDHE-ECDSA-CHACHA20-POLY1305": "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256", // 0xCC,0xA9 + "ECDHE-RSA-CHACHA20-POLY1305": "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256", // 0xCC,0xA8 + "ECDHE-ECDSA-AES128-SHA256": "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", // 0xC0,0x23 + "ECDHE-RSA-AES128-SHA256": "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", // 0xC0,0x27 + "AES128-GCM-SHA256": "TLS_RSA_WITH_AES_128_GCM_SHA256", // 0x00,0x9C + "AES256-GCM-SHA384": "TLS_RSA_WITH_AES_256_GCM_SHA384", // 0x00,0x9D + "AES128-SHA256": "TLS_RSA_WITH_AES_128_CBC_SHA256", // 0x00,0x3C + + // TLS 1 + "ECDHE-ECDSA-AES128-SHA": "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", // 0xC0,0x09 + "ECDHE-RSA-AES128-SHA": "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", // 0xC0,0x13 + "ECDHE-ECDSA-AES256-SHA": "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", // 0xC0,0x0A + "ECDHE-RSA-AES256-SHA": "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", // 0xC0,0x14 + + // SSL 3 + "AES128-SHA": "TLS_RSA_WITH_AES_128_CBC_SHA", // 0x00,0x2F + "AES256-SHA": "TLS_RSA_WITH_AES_256_CBC_SHA", // 0x00,0x35 + "DES-CBC3-SHA": "TLS_RSA_WITH_3DES_EDE_CBC_SHA", // 0x00,0x0A +} + +// CipherSuitesToNamesOrDie given a list of cipher suites as ints, return their readable names +func CipherSuitesToNamesOrDie(intVals []uint16) []string { + ret := []string{} + for _, intVal := range intVals { + ret = append(ret, CipherSuiteToNameOrDie(intVal)) + } + + return ret +} + +// CipherSuiteToNameOrDie given a cipher suite as an int, return its readable name +func CipherSuiteToNameOrDie(intVal uint16) string { + // The following suite ids appear twice in the cipher map (with + // and without the _SHA256 suffix) for the purposes of backwards + // compatibility. Always return the current rather than the legacy + // name. + switch intVal { + case tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256: + return "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256" + case tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256: + return "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256" + } + + matches := []string{} + for key, version := range ciphers { + if version == intVal { + matches = append(matches, key) + } + } + + if len(matches) == 0 { + panic(fmt.Sprintf("no name found for %d", intVal)) + } + if len(matches) > 1 { + panic(fmt.Sprintf("multiple names found for %d: %v", intVal, matches)) + } + return matches[0] +} + +func CipherSuite(cipherName string) (uint16, error) { + if cipher, ok := ciphers[cipherName]; ok { + return cipher, nil + } + + if _, ok := ciphersTLS13[cipherName]; ok { + return 0, fmt.Errorf("all golang TLSv1.3 ciphers are always used for TLSv1.3 flows") + } + + return 0, fmt.Errorf("unknown cipher name %q", cipherName) +} + +func CipherSuitesOrDie(cipherNames []string) []uint16 { + if len(cipherNames) == 0 { + return DefaultCiphers() + } + cipherValues := []uint16{} + for _, cipherName := range cipherNames { + cipher, err := CipherSuite(cipherName) + if err != nil { + panic(err) + } + cipherValues = append(cipherValues, cipher) + } + return cipherValues +} +func ValidCipherSuites() []string { + validCipherSuites := []string{} + for k := range ciphers { + validCipherSuites = append(validCipherSuites, k) + } + sort.Strings(validCipherSuites) + return validCipherSuites +} +func DefaultCiphers() []uint16 { + // HTTP/2 mandates TLS 1.2 or higher with an AEAD cipher + // suite (GCM, Poly1305) and ephemeral key exchange (ECDHE, DHE) for + // perfect forward secrecy. Servers may provide additional cipher + // suites for backwards compatibility with HTTP/1.1 clients. + // See RFC7540, section 9.2 (Use of TLS Features) and Appendix A + // (TLS 1.2 Cipher Suite Black List). + return []uint16{ + tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, + tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, // required by http/2 + tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, // forbidden by http/2, not flagged by http2isBadCipher() in go1.8 + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, // forbidden by http/2, not flagged by http2isBadCipher() in go1.8 + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, // forbidden by http/2 + tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, // forbidden by http/2 + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, // forbidden by http/2 + tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, // forbidden by http/2 + tls.TLS_RSA_WITH_AES_128_GCM_SHA256, // forbidden by http/2 + tls.TLS_RSA_WITH_AES_256_GCM_SHA384, // forbidden by http/2 + // the next one is in the intermediate suite, but go1.8 http2isBadCipher() complains when it is included at the recommended index + // because it comes after ciphers forbidden by the http/2 spec + // tls.TLS_RSA_WITH_AES_128_CBC_SHA256, + // tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, // forbidden by http/2, disabled to mitigate SWEET32 attack + // tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA, // forbidden by http/2, disabled to mitigate SWEET32 attack + tls.TLS_RSA_WITH_AES_128_CBC_SHA, // forbidden by http/2 + tls.TLS_RSA_WITH_AES_256_CBC_SHA, // forbidden by http/2 + } +} + +// SecureTLSConfig enforces the default minimum security settings for the cluster. +func SecureTLSConfig(config *tls.Config) *tls.Config { + if config.MinVersion == 0 { + config.MinVersion = DefaultTLSVersion() + } + + config.PreferServerCipherSuites = true + if len(config.CipherSuites) == 0 { + config.CipherSuites = DefaultCiphers() + } + return config +} + +// OpenSSLToIANACipherSuites maps input OpenSSL Cipher Suite names to their +// IANA counterparts. +// Unknown ciphers are left out. +func OpenSSLToIANACipherSuites(ciphers []string) []string { + ianaCiphers := make([]string, 0, len(ciphers)) + + for _, c := range ciphers { + ianaCipher, found := openSSLToIANACiphersMap[c] + if found { + ianaCiphers = append(ianaCiphers, ianaCipher) + } + } + + return ianaCiphers +} + +type TLSCertificateConfig struct { + Certs []*x509.Certificate + Key crypto.PrivateKey +} + +type TLSCARoots struct { + Roots []*x509.Certificate +} + +func (c *TLSCertificateConfig) WriteCertConfigFile(certFile, keyFile string) error { + // ensure parent dir + if err := os.MkdirAll(filepath.Dir(certFile), os.FileMode(0755)); err != nil { + return err + } + certFileWriter, err := os.OpenFile(certFile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) + if err != nil { + return err + } + if err := os.MkdirAll(filepath.Dir(keyFile), os.FileMode(0755)); err != nil { + return err + } + keyFileWriter, err := os.OpenFile(keyFile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) + if err != nil { + return err + } + + if err := writeCertificates(certFileWriter, c.Certs...); err != nil { + return err + } + if err := writeKeyFile(keyFileWriter, c.Key); err != nil { + return err + } + + if err := certFileWriter.Close(); err != nil { + return err + } + if err := keyFileWriter.Close(); err != nil { + return err + } + + return nil +} + +func (c *TLSCertificateConfig) WriteCertConfig(certFile, keyFile io.Writer) error { + if err := writeCertificates(certFile, c.Certs...); err != nil { + return err + } + if err := writeKeyFile(keyFile, c.Key); err != nil { + return err + } + return nil +} + +func (c *TLSCertificateConfig) GetPEMBytes() ([]byte, []byte, error) { + certBytes, err := EncodeCertificates(c.Certs...) + if err != nil { + return nil, nil, err + } + keyBytes, err := encodeKey(c.Key) + if err != nil { + return nil, nil, err + } + + return certBytes, keyBytes, nil +} + +func GetTLSCertificateConfig(certFile, keyFile string) (*TLSCertificateConfig, error) { + if len(certFile) == 0 { + return nil, errors.New("certFile missing") + } + if len(keyFile) == 0 { + return nil, errors.New("keyFile missing") + } + + certPEMBlock, err := os.ReadFile(certFile) + if err != nil { + return nil, err + } + certs, err := cert.ParseCertsPEM(certPEMBlock) + if err != nil { + return nil, fmt.Errorf("Error reading %s: %s", certFile, err) + } + + keyPEMBlock, err := os.ReadFile(keyFile) + if err != nil { + return nil, err + } + keyPairCert, err := tls.X509KeyPair(certPEMBlock, keyPEMBlock) + if err != nil { + return nil, err + } + key := keyPairCert.PrivateKey + + return &TLSCertificateConfig{certs, key}, nil +} + +func GetTLSCertificateConfigFromBytes(certBytes, keyBytes []byte) (*TLSCertificateConfig, error) { + if len(certBytes) == 0 { + return nil, errors.New("certFile missing") + } + if len(keyBytes) == 0 { + return nil, errors.New("keyFile missing") + } + + certs, err := cert.ParseCertsPEM(certBytes) + if err != nil { + return nil, fmt.Errorf("Error reading cert: %s", err) + } + + keyPairCert, err := tls.X509KeyPair(certBytes, keyBytes) + if err != nil { + return nil, err + } + key := keyPairCert.PrivateKey + + return &TLSCertificateConfig{certs, key}, nil +} + +const ( + DefaultCertificateLifetimeInDays = 365 * 2 // 2 years + DefaultCACertificateLifetimeInDays = 365 * 5 // 5 years + + // Default keys are 2048 bits + keyBits = 2048 +) + +type CA struct { + Config *TLSCertificateConfig + + SerialGenerator SerialGenerator +} + +// SerialGenerator is an interface for getting a serial number for the cert. It MUST be thread-safe. +type SerialGenerator interface { + Next(template *x509.Certificate) (int64, error) +} + +// SerialFileGenerator returns a unique, monotonically increasing serial number and ensures the CA on disk records that value. +type SerialFileGenerator struct { + SerialFile string + + // lock guards access to the Serial field + lock sync.Mutex + Serial int64 +} + +func NewSerialFileGenerator(serialFile string) (*SerialFileGenerator, error) { + // read serial file, it must already exist + serial, err := fileToSerial(serialFile) + if err != nil { + return nil, err + } + + generator := &SerialFileGenerator{ + Serial: serial, + SerialFile: serialFile, + } + + // 0 is unused and 1 is reserved for the CA itself + // Thus we need to guarantee that the first external call to SerialFileGenerator.Next returns 2+ + // meaning that SerialFileGenerator.Serial must not be less than 1 (it is guaranteed to be non-negative) + if generator.Serial < 1 { + // fake a call to Next so the file stays in sync and Serial is incremented + if _, err := generator.Next(&x509.Certificate{}); err != nil { + return nil, err + } + } + + return generator, nil +} + +// Next returns a unique, monotonically increasing serial number and ensures the CA on disk records that value. +func (s *SerialFileGenerator) Next(template *x509.Certificate) (int64, error) { + s.lock.Lock() + defer s.lock.Unlock() + + // do a best effort check to make sure concurrent external writes are not occurring to the underlying serial file + serial, err := fileToSerial(s.SerialFile) + if err != nil { + return 0, err + } + if serial != s.Serial { + return 0, fmt.Errorf("serial file %s out of sync ram=%d disk=%d", s.SerialFile, s.Serial, serial) + } + + next := s.Serial + 1 + s.Serial = next + + // Output in hex, padded to multiples of two characters for OpenSSL's sake + serialText := fmt.Sprintf("%X", next) + if len(serialText)%2 == 1 { + serialText = "0" + serialText + } + // always add a newline at the end to have a valid file + serialText += "\n" + + if err := os.WriteFile(s.SerialFile, []byte(serialText), os.FileMode(0640)); err != nil { + return 0, err + } + return next, nil +} + +func fileToSerial(serialFile string) (int64, error) { + serialData, err := os.ReadFile(serialFile) + if err != nil { + return 0, err + } + + // read the file as a single hex number after stripping any whitespace + serial, err := strconv.ParseInt(string(bytes.TrimSpace(serialData)), 16, 64) + if err != nil { + return 0, err + } + + if serial < 0 { + return 0, fmt.Errorf("invalid negative serial %d in serial file %s", serial, serialFile) + } + + return serial, nil +} + +// RandomSerialGenerator returns a serial based on time.Now and the subject +type RandomSerialGenerator struct { +} + +func (s *RandomSerialGenerator) Next(template *x509.Certificate) (int64, error) { + return randomSerialNumber(), nil +} + +// randomSerialNumber returns a random int64 serial number based on +// time.Now. It is defined separately from the generator interface so +// that the caller doesn't have to worry about an input template or +// error - these are unnecessary when creating a random serial. +func randomSerialNumber() int64 { + r := mathrand.New(mathrand.NewSource(time.Now().UTC().UnixNano())) + return r.Int63() +} + +// EnsureCA returns a CA, whether it was created (as opposed to pre-existing), and any error +// if serialFile is empty, a RandomSerialGenerator will be used +func EnsureCA(certFile, keyFile, serialFile, name string, expireDays int) (*CA, bool, error) { + if ca, err := GetCA(certFile, keyFile, serialFile); err == nil { + return ca, false, err + } + ca, err := MakeSelfSignedCA(certFile, keyFile, serialFile, name, expireDays) + return ca, true, err +} + +// if serialFile is empty, a RandomSerialGenerator will be used +func GetCA(certFile, keyFile, serialFile string) (*CA, error) { + caConfig, err := GetTLSCertificateConfig(certFile, keyFile) + if err != nil { + return nil, err + } + + var serialGenerator SerialGenerator + if len(serialFile) > 0 { + serialGenerator, err = NewSerialFileGenerator(serialFile) + if err != nil { + return nil, err + } + } else { + serialGenerator = &RandomSerialGenerator{} + } + + return &CA{ + SerialGenerator: serialGenerator, + Config: caConfig, + }, nil +} + +func GetCAFromBytes(certBytes, keyBytes []byte) (*CA, error) { + caConfig, err := GetTLSCertificateConfigFromBytes(certBytes, keyBytes) + if err != nil { + return nil, err + } + + return &CA{ + SerialGenerator: &RandomSerialGenerator{}, + Config: caConfig, + }, nil +} + +// if serialFile is empty, a RandomSerialGenerator will be used +func MakeSelfSignedCA(certFile, keyFile, serialFile, name string, expireDays int) (*CA, error) { + klog.V(2).Infof("Generating new CA for %s cert, and key in %s, %s", name, certFile, keyFile) + + caConfig, err := MakeSelfSignedCAConfig(name, expireDays) + if err != nil { + return nil, err + } + if err := caConfig.WriteCertConfigFile(certFile, keyFile); err != nil { + return nil, err + } + + var serialGenerator SerialGenerator + if len(serialFile) > 0 { + // create / overwrite the serial file with a zero padded hex value (ending in a newline to have a valid file) + if err := os.WriteFile(serialFile, []byte("00\n"), 0644); err != nil { + return nil, err + } + serialGenerator, err = NewSerialFileGenerator(serialFile) + if err != nil { + return nil, err + } + } else { + serialGenerator = &RandomSerialGenerator{} + } + + return &CA{ + SerialGenerator: serialGenerator, + Config: caConfig, + }, nil +} + +func MakeSelfSignedCAConfig(name string, expireDays int) (*TLSCertificateConfig, error) { + subject := pkix.Name{CommonName: name} + return MakeSelfSignedCAConfigForSubject(subject, expireDays) +} + +func MakeSelfSignedCAConfigForSubject(subject pkix.Name, expireDays int) (*TLSCertificateConfig, error) { + var caLifetimeInDays = DefaultCACertificateLifetimeInDays + if expireDays > 0 { + caLifetimeInDays = expireDays + } + + if caLifetimeInDays > DefaultCACertificateLifetimeInDays { + warnAboutCertificateLifeTime(subject.CommonName, DefaultCACertificateLifetimeInDays) + } + + caLifetime := time.Duration(caLifetimeInDays) * 24 * time.Hour + return makeSelfSignedCAConfigForSubjectAndDuration(subject, caLifetime) +} + +func MakeSelfSignedCAConfigForDuration(name string, caLifetime time.Duration) (*TLSCertificateConfig, error) { + subject := pkix.Name{CommonName: name} + return makeSelfSignedCAConfigForSubjectAndDuration(subject, caLifetime) +} + +func makeSelfSignedCAConfigForSubjectAndDuration(subject pkix.Name, caLifetime time.Duration) (*TLSCertificateConfig, error) { + // Create CA cert + rootcaPublicKey, rootcaPrivateKey, publicKeyHash, err := newKeyPairWithHash() + if err != nil { + return nil, err + } + // AuthorityKeyId and SubjectKeyId should match for a self-signed CA + authorityKeyId := publicKeyHash + subjectKeyId := publicKeyHash + rootcaTemplate := newSigningCertificateTemplateForDuration(subject, caLifetime, time.Now, authorityKeyId, subjectKeyId) + rootcaCert, err := signCertificate(rootcaTemplate, rootcaPublicKey, rootcaTemplate, rootcaPrivateKey) + if err != nil { + return nil, err + } + caConfig := &TLSCertificateConfig{ + Certs: []*x509.Certificate{rootcaCert}, + Key: rootcaPrivateKey, + } + return caConfig, nil +} + +func MakeCAConfigForDuration(name string, caLifetime time.Duration, issuer *CA) (*TLSCertificateConfig, error) { + // Create CA cert + signerPublicKey, signerPrivateKey, publicKeyHash, err := newKeyPairWithHash() + if err != nil { + return nil, err + } + authorityKeyId := issuer.Config.Certs[0].SubjectKeyId + subjectKeyId := publicKeyHash + signerTemplate := newSigningCertificateTemplateForDuration(pkix.Name{CommonName: name}, caLifetime, time.Now, authorityKeyId, subjectKeyId) + signerCert, err := issuer.signCertificate(signerTemplate, signerPublicKey) + if err != nil { + return nil, err + } + signerConfig := &TLSCertificateConfig{ + Certs: append([]*x509.Certificate{signerCert}, issuer.Config.Certs...), + Key: signerPrivateKey, + } + return signerConfig, nil +} + +// EnsureSubCA returns a subCA signed by the `ca`, whether it was created +// (as opposed to pre-existing), and any error that might occur during the subCA +// creation. +// If serialFile is an empty string, a RandomSerialGenerator will be used. +func (ca *CA) EnsureSubCA(certFile, keyFile, serialFile, name string, expireDays int) (*CA, bool, error) { + if subCA, err := GetCA(certFile, keyFile, serialFile); err == nil { + return subCA, false, err + } + subCA, err := ca.MakeAndWriteSubCA(certFile, keyFile, serialFile, name, expireDays) + return subCA, true, err +} + +// MakeAndWriteSubCA returns a new sub-CA configuration. New cert/key pair is generated +// while using this function. +// If serialFile is an empty string, a RandomSerialGenerator will be used. +func (ca *CA) MakeAndWriteSubCA(certFile, keyFile, serialFile, name string, expireDays int) (*CA, error) { + klog.V(4).Infof("Generating sub-CA certificate in %s, key in %s, serial in %s", certFile, keyFile, serialFile) + + subCAConfig, err := MakeCAConfigForDuration(name, time.Duration(expireDays)*time.Hour*24, ca) + if err != nil { + return nil, err + } + + if err := subCAConfig.WriteCertConfigFile(certFile, keyFile); err != nil { + return nil, err + } + + var serialGenerator SerialGenerator + if len(serialFile) > 0 { + // create / overwrite the serial file with a zero padded hex value (ending in a newline to have a valid file) + if err := os.WriteFile(serialFile, []byte("00\n"), 0644); err != nil { + return nil, err + } + + serialGenerator, err = NewSerialFileGenerator(serialFile) + if err != nil { + return nil, err + } + } else { + serialGenerator = &RandomSerialGenerator{} + } + + return &CA{ + Config: subCAConfig, + SerialGenerator: serialGenerator, + }, nil +} + +func (ca *CA) EnsureServerCert(certFile, keyFile string, hostnames sets.String, expireDays int) (*TLSCertificateConfig, bool, error) { + certConfig, err := GetServerCert(certFile, keyFile, hostnames) + if err != nil { + certConfig, err = ca.MakeAndWriteServerCert(certFile, keyFile, hostnames, expireDays) + return certConfig, true, err + } + + return certConfig, false, nil +} + +func GetServerCert(certFile, keyFile string, hostnames sets.String) (*TLSCertificateConfig, error) { + server, err := GetTLSCertificateConfig(certFile, keyFile) + if err != nil { + return nil, err + } + + cert := server.Certs[0] + ips, dns := IPAddressesDNSNames(hostnames.List()) + missingIps := ipsNotInSlice(ips, cert.IPAddresses) + missingDns := stringsNotInSlice(dns, cert.DNSNames) + if len(missingIps) == 0 && len(missingDns) == 0 { + klog.V(4).Infof("Found existing server certificate in %s", certFile) + return server, nil + } + + return nil, fmt.Errorf("Existing server certificate in %s was missing some hostnames (%v) or IP addresses (%v).", certFile, missingDns, missingIps) +} + +func (ca *CA) MakeAndWriteServerCert(certFile, keyFile string, hostnames sets.String, expireDays int) (*TLSCertificateConfig, error) { + klog.V(4).Infof("Generating server certificate in %s, key in %s", certFile, keyFile) + + server, err := ca.MakeServerCert(hostnames, expireDays) + if err != nil { + return nil, err + } + if err := server.WriteCertConfigFile(certFile, keyFile); err != nil { + return server, err + } + return server, nil +} + +// CertificateExtensionFunc is passed a certificate that it may extend, or return an error +// if the extension attempt failed. +type CertificateExtensionFunc func(*x509.Certificate) error + +func (ca *CA) MakeServerCert(hostnames sets.String, expireDays int, fns ...CertificateExtensionFunc) (*TLSCertificateConfig, error) { + serverPublicKey, serverPrivateKey, publicKeyHash, _ := newKeyPairWithHash() + authorityKeyId := ca.Config.Certs[0].SubjectKeyId + subjectKeyId := publicKeyHash + serverTemplate := newServerCertificateTemplate(pkix.Name{CommonName: hostnames.List()[0]}, hostnames.List(), expireDays, time.Now, authorityKeyId, subjectKeyId) + for _, fn := range fns { + if err := fn(serverTemplate); err != nil { + return nil, err + } + } + serverCrt, err := ca.signCertificate(serverTemplate, serverPublicKey) + if err != nil { + return nil, err + } + server := &TLSCertificateConfig{ + Certs: append([]*x509.Certificate{serverCrt}, ca.Config.Certs...), + Key: serverPrivateKey, + } + return server, nil +} + +func (ca *CA) MakeServerCertForDuration(hostnames sets.String, lifetime time.Duration, fns ...CertificateExtensionFunc) (*TLSCertificateConfig, error) { + serverPublicKey, serverPrivateKey, publicKeyHash, _ := newKeyPairWithHash() + authorityKeyId := ca.Config.Certs[0].SubjectKeyId + subjectKeyId := publicKeyHash + serverTemplate := newServerCertificateTemplateForDuration(pkix.Name{CommonName: hostnames.List()[0]}, hostnames.List(), lifetime, time.Now, authorityKeyId, subjectKeyId) + for _, fn := range fns { + if err := fn(serverTemplate); err != nil { + return nil, err + } + } + serverCrt, err := ca.signCertificate(serverTemplate, serverPublicKey) + if err != nil { + return nil, err + } + server := &TLSCertificateConfig{ + Certs: append([]*x509.Certificate{serverCrt}, ca.Config.Certs...), + Key: serverPrivateKey, + } + return server, nil +} + +func (ca *CA) EnsureClientCertificate(certFile, keyFile string, u user.Info, expireDays int) (*TLSCertificateConfig, bool, error) { + certConfig, err := GetClientCertificate(certFile, keyFile, u) + if err != nil { + certConfig, err = ca.MakeClientCertificate(certFile, keyFile, u, expireDays) + return certConfig, true, err // true indicates we wrote the files. + } + return certConfig, false, nil +} + +func GetClientCertificate(certFile, keyFile string, u user.Info) (*TLSCertificateConfig, error) { + certConfig, err := GetTLSCertificateConfig(certFile, keyFile) + if err != nil { + return nil, err + } + + if subject := certConfig.Certs[0].Subject; subjectChanged(subject, userToSubject(u)) { + return nil, fmt.Errorf("existing client certificate in %s was issued for a different Subject (%s)", + certFile, subject) + } + + return certConfig, nil +} + +func subjectChanged(existing, expected pkix.Name) bool { + sort.Strings(existing.Organization) + sort.Strings(expected.Organization) + + return existing.CommonName != expected.CommonName || + existing.SerialNumber != expected.SerialNumber || + !reflect.DeepEqual(existing.Organization, expected.Organization) +} + +func (ca *CA) MakeClientCertificate(certFile, keyFile string, u user.Info, expireDays int) (*TLSCertificateConfig, error) { + klog.V(4).Infof("Generating client cert in %s and key in %s", certFile, keyFile) + // ensure parent dirs + if err := os.MkdirAll(filepath.Dir(certFile), os.FileMode(0755)); err != nil { + return nil, err + } + if err := os.MkdirAll(filepath.Dir(keyFile), os.FileMode(0755)); err != nil { + return nil, err + } + + clientPublicKey, clientPrivateKey, _ := NewKeyPair() + clientTemplate := newClientCertificateTemplate(userToSubject(u), expireDays, time.Now) + clientCrt, err := ca.signCertificate(clientTemplate, clientPublicKey) + if err != nil { + return nil, err + } + + certData, err := EncodeCertificates(clientCrt) + if err != nil { + return nil, err + } + keyData, err := encodeKey(clientPrivateKey) + if err != nil { + return nil, err + } + + if err = os.WriteFile(certFile, certData, os.FileMode(0644)); err != nil { + return nil, err + } + if err = os.WriteFile(keyFile, keyData, os.FileMode(0600)); err != nil { + return nil, err + } + + return GetTLSCertificateConfig(certFile, keyFile) +} + +func (ca *CA) MakeClientCertificateForDuration(u user.Info, lifetime time.Duration) (*TLSCertificateConfig, error) { + clientPublicKey, clientPrivateKey, _ := NewKeyPair() + clientTemplate := newClientCertificateTemplateForDuration(userToSubject(u), lifetime, time.Now) + clientCrt, err := ca.signCertificate(clientTemplate, clientPublicKey) + if err != nil { + return nil, err + } + + certData, err := EncodeCertificates(clientCrt) + if err != nil { + return nil, err + } + keyData, err := encodeKey(clientPrivateKey) + if err != nil { + return nil, err + } + + return GetTLSCertificateConfigFromBytes(certData, keyData) +} + +type sortedForDER []string + +func (s sortedForDER) Len() int { + return len(s) +} +func (s sortedForDER) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} +func (s sortedForDER) Less(i, j int) bool { + l1 := len(s[i]) + l2 := len(s[j]) + if l1 == l2 { + return s[i] < s[j] + } + return l1 < l2 +} + +func userToSubject(u user.Info) pkix.Name { + // Ok we are going to order groups in a peculiar way here to workaround a + // 2 bugs, 1 in golang (https://github.com/golang/go/issues/24254) which + // incorrectly encodes Multivalued RDNs and another in GNUTLS clients + // which are too picky (https://gitlab.com/gnutls/gnutls/issues/403) + // and try to "correct" this issue when reading client certs. + // + // This workaround should be killed once Golang's pkix module is fixed to + // generate a correct DER encoding. + // + // The workaround relies on the fact that the first octect that differs + // between the encoding of two group RDNs will end up being the encoded + // length which is directly related to the group name's length. So we'll + // sort such that shortest names come first. + ugroups := u.GetGroups() + groups := make([]string, len(ugroups)) + copy(groups, ugroups) + sort.Sort(sortedForDER(groups)) + + return pkix.Name{ + CommonName: u.GetName(), + SerialNumber: u.GetUID(), + Organization: groups, + } +} + +func (ca *CA) signCertificate(template *x509.Certificate, requestKey crypto.PublicKey) (*x509.Certificate, error) { + // Increment and persist serial + serial, err := ca.SerialGenerator.Next(template) + if err != nil { + return nil, err + } + template.SerialNumber = big.NewInt(serial) + return signCertificate(template, requestKey, ca.Config.Certs[0], ca.Config.Key) +} + +func NewKeyPair() (crypto.PublicKey, crypto.PrivateKey, error) { + return newRSAKeyPair() +} + +func newKeyPairWithHash() (crypto.PublicKey, crypto.PrivateKey, []byte, error) { + publicKey, privateKey, err := newRSAKeyPair() + var publicKeyHash []byte + if err == nil { + hash := sha1.New() + hash.Write(publicKey.N.Bytes()) + publicKeyHash = hash.Sum(nil) + } + return publicKey, privateKey, publicKeyHash, err +} + +func newRSAKeyPair() (*rsa.PublicKey, *rsa.PrivateKey, error) { + privateKey, err := rsa.GenerateKey(rand.Reader, keyBits) + if err != nil { + return nil, nil, err + } + return &privateKey.PublicKey, privateKey, nil +} + +// Can be used for CA or intermediate signing certs +func newSigningCertificateTemplateForDuration(subject pkix.Name, caLifetime time.Duration, currentTime func() time.Time, authorityKeyId, subjectKeyId []byte) *x509.Certificate { + return &x509.Certificate{ + Subject: subject, + + SignatureAlgorithm: x509.SHA256WithRSA, + + NotBefore: currentTime().Add(-1 * time.Second), + NotAfter: currentTime().Add(caLifetime), + + // Specify a random serial number to avoid the same issuer+serial + // number referring to different certs in a chain of trust if the + // signing certificate is ever rotated. + SerialNumber: big.NewInt(randomSerialNumber()), + + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, + BasicConstraintsValid: true, + IsCA: true, + + AuthorityKeyId: authorityKeyId, + SubjectKeyId: subjectKeyId, + } +} + +// Can be used for ListenAndServeTLS +func newServerCertificateTemplate(subject pkix.Name, hosts []string, expireDays int, currentTime func() time.Time, authorityKeyId, subjectKeyId []byte) *x509.Certificate { + var lifetimeInDays = DefaultCertificateLifetimeInDays + if expireDays > 0 { + lifetimeInDays = expireDays + } + + if lifetimeInDays > DefaultCertificateLifetimeInDays { + warnAboutCertificateLifeTime(subject.CommonName, DefaultCertificateLifetimeInDays) + } + + lifetime := time.Duration(lifetimeInDays) * 24 * time.Hour + + return newServerCertificateTemplateForDuration(subject, hosts, lifetime, currentTime, authorityKeyId, subjectKeyId) +} + +// Can be used for ListenAndServeTLS +func newServerCertificateTemplateForDuration(subject pkix.Name, hosts []string, lifetime time.Duration, currentTime func() time.Time, authorityKeyId, subjectKeyId []byte) *x509.Certificate { + template := &x509.Certificate{ + Subject: subject, + + SignatureAlgorithm: x509.SHA256WithRSA, + + NotBefore: currentTime().Add(-1 * time.Second), + NotAfter: currentTime().Add(lifetime), + SerialNumber: big.NewInt(1), + + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + BasicConstraintsValid: true, + + AuthorityKeyId: authorityKeyId, + SubjectKeyId: subjectKeyId, + } + + template.IPAddresses, template.DNSNames = IPAddressesDNSNames(hosts) + + return template +} + +func IPAddressesDNSNames(hosts []string) ([]net.IP, []string) { + ips := []net.IP{} + dns := []string{} + for _, host := range hosts { + if ip := net.ParseIP(host); ip != nil { + ips = append(ips, ip) + } else { + dns = append(dns, host) + } + } + + // Include IP addresses as DNS subjectAltNames in the cert as well, for the sake of Python, Windows (< 10), and unnamed other libraries + // Ensure these technically invalid DNS subjectAltNames occur after the valid ones, to avoid triggering cert errors in Firefox + // See https://bugzilla.mozilla.org/show_bug.cgi?id=1148766 + for _, ip := range ips { + dns = append(dns, ip.String()) + } + + return ips, dns +} + +func CertsFromPEM(pemCerts []byte) ([]*x509.Certificate, error) { + ok := false + certs := []*x509.Certificate{} + for len(pemCerts) > 0 { + var block *pem.Block + block, pemCerts = pem.Decode(pemCerts) + if block == nil { + break + } + if block.Type != "CERTIFICATE" || len(block.Headers) != 0 { + continue + } + + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return certs, err + } + + certs = append(certs, cert) + ok = true + } + + if !ok { + return certs, errors.New("Could not read any certificates") + } + return certs, nil +} + +// Can be used as a certificate in http.Transport TLSClientConfig +func newClientCertificateTemplate(subject pkix.Name, expireDays int, currentTime func() time.Time) *x509.Certificate { + var lifetimeInDays = DefaultCertificateLifetimeInDays + if expireDays > 0 { + lifetimeInDays = expireDays + } + + if lifetimeInDays > DefaultCertificateLifetimeInDays { + warnAboutCertificateLifeTime(subject.CommonName, DefaultCertificateLifetimeInDays) + } + + lifetime := time.Duration(lifetimeInDays) * 24 * time.Hour + + return newClientCertificateTemplateForDuration(subject, lifetime, currentTime) +} + +// Can be used as a certificate in http.Transport TLSClientConfig +func newClientCertificateTemplateForDuration(subject pkix.Name, lifetime time.Duration, currentTime func() time.Time) *x509.Certificate { + return &x509.Certificate{ + Subject: subject, + + SignatureAlgorithm: x509.SHA256WithRSA, + + NotBefore: currentTime().Add(-1 * time.Second), + NotAfter: currentTime().Add(lifetime), + SerialNumber: big.NewInt(1), + + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth}, + BasicConstraintsValid: true, + } +} + +func warnAboutCertificateLifeTime(name string, defaultLifetimeInDays int) { + defaultLifetimeInYears := defaultLifetimeInDays / 365 + fmt.Fprintf(os.Stderr, "WARNING: Validity period of the certificate for %q is greater than %d years!\n", name, defaultLifetimeInYears) + fmt.Fprintln(os.Stderr, "WARNING: By security reasons it is strongly recommended to change this period and make it smaller!") +} + +func signCertificate(template *x509.Certificate, requestKey crypto.PublicKey, issuer *x509.Certificate, issuerKey crypto.PrivateKey) (*x509.Certificate, error) { + derBytes, err := x509.CreateCertificate(rand.Reader, template, issuer, requestKey, issuerKey) + if err != nil { + return nil, err + } + certs, err := x509.ParseCertificates(derBytes) + if err != nil { + return nil, err + } + if len(certs) != 1 { + return nil, errors.New("Expected a single certificate") + } + return certs[0], nil +} + +func EncodeCertificates(certs ...*x509.Certificate) ([]byte, error) { + b := bytes.Buffer{} + for _, cert := range certs { + if err := pem.Encode(&b, &pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw}); err != nil { + return []byte{}, err + } + } + return b.Bytes(), nil +} +func encodeKey(key crypto.PrivateKey) ([]byte, error) { + b := bytes.Buffer{} + switch key := key.(type) { + case *ecdsa.PrivateKey: + keyBytes, err := x509.MarshalECPrivateKey(key) + if err != nil { + return []byte{}, err + } + if err := pem.Encode(&b, &pem.Block{Type: "EC PRIVATE KEY", Bytes: keyBytes}); err != nil { + return b.Bytes(), err + } + case *rsa.PrivateKey: + if err := pem.Encode(&b, &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(key)}); err != nil { + return []byte{}, err + } + default: + return []byte{}, errors.New("Unrecognized key type") + + } + return b.Bytes(), nil +} + +func writeCertificates(f io.Writer, certs ...*x509.Certificate) error { + bytes, err := EncodeCertificates(certs...) + if err != nil { + return err + } + if _, err := f.Write(bytes); err != nil { + return err + } + + return nil +} +func writeKeyFile(f io.Writer, key crypto.PrivateKey) error { + bytes, err := encodeKey(key) + if err != nil { + return err + } + if _, err := f.Write(bytes); err != nil { + return err + } + + return nil +} + +func stringsNotInSlice(needles []string, haystack []string) []string { + missing := []string{} + for _, needle := range needles { + if !stringInSlice(needle, haystack) { + missing = append(missing, needle) + } + } + return missing +} + +func stringInSlice(needle string, haystack []string) bool { + for _, straw := range haystack { + if needle == straw { + return true + } + } + return false +} + +func ipsNotInSlice(needles []net.IP, haystack []net.IP) []net.IP { + missing := []net.IP{} + for _, needle := range needles { + if !ipInSlice(needle, haystack) { + missing = append(missing, needle) + } + } + return missing +} + +func ipInSlice(needle net.IP, haystack []net.IP) bool { + for _, straw := range haystack { + if needle.Equal(straw) { + return true + } + } + return false +} diff --git a/vendor/github.com/openshift/library-go/pkg/crypto/rotation.go b/vendor/github.com/openshift/library-go/pkg/crypto/rotation.go new file mode 100644 index 000000000..0aa127037 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/crypto/rotation.go @@ -0,0 +1,20 @@ +package crypto + +import ( + "crypto/x509" + "time" +) + +// FilterExpiredCerts checks are all certificates in the bundle valid, i.e. they have not expired. +// The function returns new bundle with only valid certificates or error if no valid certificate is found. +func FilterExpiredCerts(certs ...*x509.Certificate) []*x509.Certificate { + currentTime := time.Now() + var validCerts []*x509.Certificate + for _, c := range certs { + if c.NotAfter.After(currentTime) { + validCerts = append(validCerts, c) + } + } + + return validCerts +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/condition/condition.go b/vendor/github.com/openshift/library-go/pkg/operator/condition/condition.go new file mode 100644 index 000000000..1a522609a --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/condition/condition.go @@ -0,0 +1,72 @@ +package condition + +const ( + // ManagementStateDegradedConditionType is true when the operator ManagementState is not "Managed".. + // Possible reasons are Unmanaged, Removed or Unknown. Any of these cases means the operator is not actively managing the operand. + // This condition is set to false when the ManagementState is set to back to "Managed". + ManagementStateDegradedConditionType = "ManagementStateDegraded" + + // UnsupportedConfigOverridesUpgradeableConditionType is true when operator unsupported config overrides is changed. + // When NoUnsupportedConfigOverrides reason is given it means there are no unsupported config overrides. + // When UnsupportedConfigOverridesSet reason is given it means the unsupported config overrides are set, which might impact the ability + // of operator to successfully upgrade its operand. + UnsupportedConfigOverridesUpgradeableConditionType = "UnsupportedConfigOverridesUpgradeable" + + // MonitoringResourceControllerDegradedConditionType is true when the operator is unable to create or reconcile the ServiceMonitor + // CR resource, which is required by monitoring operator to collect Prometheus data from the operator. When this condition is true and the ServiceMonitor + // is already created, it won't have impact on collecting metrics. However, if the ServiceMonitor was not created, the metrics won't be available for + // collection until this condition is set to false. + // The condition is set to false automatically when the operator successfully synchronize the ServiceMonitor resource. + MonitoringResourceControllerDegradedConditionType = "MonitoringResourceControllerDegraded" + + // BackingResourceControllerDegradedConditionType is true when the operator is unable to create or reconcile the resources needed + // to successfully run the installer pods (installer CRB and SA). If these were already created, this condition is not fatal, however if the resources + // were not created it means the installer pod creation will fail. + // This condition is set to false when the operator can successfully synchronize installer SA and CRB. + BackingResourceControllerDegradedConditionType = "BackingResourceControllerDegraded" + + // StaticPodsDegradedConditionType is true when the operator observe errors when installing the new revision static pods. + // This condition report Error reason when the pods are terminated or not ready or waiting during which the operand quality of service is degraded. + // This condition is set to False when the pods change state to running and are observed ready. + StaticPodsDegradedConditionType = "StaticPodsDegraded" + + // StaticPodsAvailableConditionType is true when the static pod is available on at least one node. + StaticPodsAvailableConditionType = "StaticPodsAvailable" + + // ConfigObservationDegradedConditionType is true when the operator failed to observe or process configuration change. + // This is not transient condition and normally a correction or manual intervention is required on the config custom resource. + ConfigObservationDegradedConditionType = "ConfigObservationDegraded" + + // ResourceSyncControllerDegradedConditionType is true when the operator failed to synchronize one or more secrets or config maps required + // to run the operand. Operand ability to provide service might be affected by this condition. + // This condition is set to false when the operator is able to create secrets and config maps. + ResourceSyncControllerDegradedConditionType = "ResourceSyncControllerDegraded" + + // CertRotationDegradedConditionTypeFmt is true when the operator failed to properly rotate one or more certificates required by the operand. + // The RotationError reason is given with message describing details of this failure. This condition can be fatal when ignored as the existing certificate(s) + // validity can expire and without rotating/renewing them manual recovery might be required to fix the cluster. + CertRotationDegradedConditionTypeFmt = "CertRotation_%s_Degraded" + + // InstallerControllerDegradedConditionType is true when the operator is not able to create new installer pods so the new revisions + // cannot be rolled out. This might happen when one or more required secrets or config maps does not exists. + // In case the missing secret or config map is available, this condition is automatically set to false. + InstallerControllerDegradedConditionType = "InstallerControllerDegraded" + + // NodeInstallerDegradedConditionType is true when the operator is not able to create new installer pods because there are no schedulable nodes + // available to run the installer pods. + // The AllNodesAtLatestRevision reason is set when all master nodes are updated to the latest revision. It is false when some masters are pending revision. + // ZeroNodesActive reason is set to True when no active master nodes are observed. Is set to False when there is at least one active master node. + NodeInstallerDegradedConditionType = "NodeInstallerDegraded" + + // NodeInstallerProgressingConditionType is true when the operator is moving nodes to a new revision. + NodeInstallerProgressingConditionType = "NodeInstallerProgressing" + + // RevisionControllerDegradedConditionType is true when the operator is not able to create new desired revision because an error occurred when + // the operator attempted to created required resource(s) (secrets, configmaps, ...). + // This condition mean no new revision will be created. + RevisionControllerDegradedConditionType = "RevisionControllerDegraded" + + // NodeControllerDegradedConditionType is true when the operator observed a master node that is not ready. + // Note that a node is not ready when its Condition.NodeReady wasn't set to true + NodeControllerDegradedConditionType = "NodeControllerDegraded" +) diff --git a/vendor/github.com/openshift/library-go/pkg/operator/configobserver/config_observer_controller.go b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/config_observer_controller.go new file mode 100644 index 000000000..3b9f61180 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/config_observer_controller.go @@ -0,0 +1,284 @@ +package configobserver + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "strings" + "time" + + "github.com/imdario/mergo" + "k8s.io/klog/v2" + + "k8s.io/apimachinery/pkg/api/equality" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/diff" + "k8s.io/apimachinery/pkg/util/rand" + "k8s.io/client-go/tools/cache" + + operatorv1 "github.com/openshift/api/operator/v1" + + "github.com/openshift/library-go/pkg/controller/factory" + "github.com/openshift/library-go/pkg/operator/condition" + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/management" + "github.com/openshift/library-go/pkg/operator/resourcesynccontroller" + "github.com/openshift/library-go/pkg/operator/v1helpers" +) + +// Listers is an interface which will be passed to the config observer funcs. It is expected to be hard-cast to the "correct" type +type Listers interface { + // ResourceSyncer can be used to copy content from one namespace to another + ResourceSyncer() resourcesynccontroller.ResourceSyncer + PreRunHasSynced() []cache.InformerSynced +} + +// ObserveConfigFunc observes configuration and returns the observedConfig. This function should not return an +// observedConfig that would cause the service being managed by the operator to crash. For example, if a required +// configuration key cannot be observed, consider reusing the configuration key's previous value. Errors that occur +// while attempting to generate the observedConfig should be returned in the errs slice. +type ObserveConfigFunc func(listers Listers, recorder events.Recorder, existingConfig map[string]interface{}) (observedConfig map[string]interface{}, errs []error) + +type ConfigObserver struct { + // observers are called in an undefined order and their results are merged to + // determine the observed configuration. + observers []ObserveConfigFunc + + operatorClient v1helpers.OperatorClient + + // listers are used by config observers to retrieve necessary resources + listers Listers + + nestedConfigPath []string + degradedConditionType string +} + +func NewConfigObserver( + operatorClient v1helpers.OperatorClient, + eventRecorder events.Recorder, + listers Listers, + informers []factory.Informer, + observers ...ObserveConfigFunc, +) factory.Controller { + return NewNestedConfigObserver( + operatorClient, + eventRecorder, + listers, + informers, + nil, + "", + observers..., + ) +} + +// NewNestedConfigObserver creates a config observer that watches changes to a nested field (nestedConfigPath) in the config. +// Useful when the config is shared across multiple controllers in the same process. +// +// Example: +// +// Given the following configuration, you could run two separate controllers and point each to its own section. +// The first controller would be responsible for "oauthAPIServer" and the second for "oauthServer" section. +// +// "observedConfig": { +// "oauthAPIServer": { +// "apiServerArguments": {"tls-min-version": "VersionTLS12"} +// }, +// "oauthServer": { +// "corsAllowedOrigins": [ "//127\\.0\\.0\\.1(:|$)","//localhost(:|$)"] +// } +// } +// +// oauthAPIController := NewNestedConfigObserver(..., []string{"oauthAPIServer"} +// oauthServerController := NewNestedConfigObserver(..., []string{"oauthServer"} +func NewNestedConfigObserver( + operatorClient v1helpers.OperatorClient, + eventRecorder events.Recorder, + listers Listers, + informers []factory.Informer, + nestedConfigPath []string, + degradedConditionPrefix string, + observers ...ObserveConfigFunc, +) factory.Controller { + c := &ConfigObserver{ + operatorClient: operatorClient, + observers: observers, + listers: listers, + nestedConfigPath: nestedConfigPath, + degradedConditionType: degradedConditionPrefix + condition.ConfigObservationDegradedConditionType, + } + + return factory.New().ResyncEvery(time.Minute).WithSync(c.sync).WithInformers(append(informers, listersToInformer(listers)...)...).ToController("ConfigObserver", eventRecorder.WithComponentSuffix("config-observer")) +} + +// sync reacts to a change in prereqs by finding information that is required to match another value in the cluster. This +// must be information that is logically "owned" by another component. +func (c ConfigObserver) sync(ctx context.Context, syncCtx factory.SyncContext) error { + originalSpec, _, _, err := c.operatorClient.GetOperatorState() + if management.IsOperatorRemovable() && apierrors.IsNotFound(err) { + return nil + } + if err != nil { + return err + } + spec := originalSpec.DeepCopy() + + // don't worry about errors. If we can't decode, we'll simply stomp over the field. + existingConfig := map[string]interface{}{} + if err := json.NewDecoder(bytes.NewBuffer(spec.ObservedConfig.Raw)).Decode(&existingConfig); err != nil { + klog.V(4).Infof("decode of existing config failed with error: %v", err) + } + + var errs []error + var observedConfigs []map[string]interface{} + for _, i := range rand.Perm(len(c.observers)) { + var currErrs []error + observedConfig, currErrs := c.observers[i](c.listers, syncCtx.Recorder(), existingConfig) + observedConfigs = append(observedConfigs, observedConfig) + errs = append(errs, currErrs...) + } + + mergedObservedConfig := map[string]interface{}{} + for _, observedConfig := range observedConfigs { + if err := mergo.Merge(&mergedObservedConfig, observedConfig); err != nil { + klog.Warningf("merging observed config failed: %v", err) + } + } + + reverseMergedObservedConfig := map[string]interface{}{} + for i := len(observedConfigs) - 1; i >= 0; i-- { + if err := mergo.Merge(&reverseMergedObservedConfig, observedConfigs[i]); err != nil { + klog.Warningf("merging observed config failed: %v", err) + } + } + + if !equality.Semantic.DeepEqual(mergedObservedConfig, reverseMergedObservedConfig) { + errs = append(errs, errors.New("non-deterministic config observation detected")) + } + + if err := c.updateObservedConfig(ctx, syncCtx, existingConfig, mergedObservedConfig); err != nil { + errs = []error{err} + } + configError := v1helpers.NewMultiLineAggregate(errs) + + // update failing condition + cond := operatorv1.OperatorCondition{ + Type: c.degradedConditionType, + Status: operatorv1.ConditionFalse, + } + if configError != nil { + cond.Status = operatorv1.ConditionTrue + cond.Reason = "Error" + cond.Message = configError.Error() + } + if _, _, updateError := v1helpers.UpdateStatus(ctx, c.operatorClient, v1helpers.UpdateConditionFn(cond)); updateError != nil { + return updateError + } + + return configError +} + +func (c ConfigObserver) updateObservedConfig(ctx context.Context, syncCtx factory.SyncContext, existingConfig map[string]interface{}, mergedObservedConfig map[string]interface{}) error { + if len(c.nestedConfigPath) == 0 { + if !equality.Semantic.DeepEqual(existingConfig, mergedObservedConfig) { + syncCtx.Recorder().Eventf("ObservedConfigChanged", "Writing updated observed config: %v", diff.ObjectDiff(existingConfig, mergedObservedConfig)) + return c.updateConfig(ctx, syncCtx, mergedObservedConfig, v1helpers.UpdateObservedConfigFn) + } + return nil + } + + existingConfigNested, _, err := unstructured.NestedMap(existingConfig, c.nestedConfigPath...) + if err != nil { + return fmt.Errorf("unable to extract the config under %v key, err %v", c.nestedConfigPath, err) + } + mergedObservedConfigNested, _, err := unstructured.NestedMap(mergedObservedConfig, c.nestedConfigPath...) + if err != nil { + return fmt.Errorf("unable to extract the merged config under %v, err %v", c.nestedConfigPath, err) + } + if !equality.Semantic.DeepEqual(existingConfigNested, mergedObservedConfigNested) { + syncCtx.Recorder().Eventf("ObservedConfigChanged", "Writing updated section (%q) of observed config: %q", strings.Join(c.nestedConfigPath, "/"), diff.ObjectDiff(existingConfigNested, mergedObservedConfigNested)) + return c.updateConfig(ctx, syncCtx, mergedObservedConfigNested, c.updateNestedConfigHelper) + } + return nil +} + +type updateObservedConfigFn func(config map[string]interface{}) v1helpers.UpdateOperatorSpecFunc + +func (c ConfigObserver) updateConfig(ctx context.Context, syncCtx factory.SyncContext, updatedMaybeNestedConfig map[string]interface{}, updateConfigHelper updateObservedConfigFn) error { + if _, _, err := v1helpers.UpdateSpec(ctx, c.operatorClient, updateConfigHelper(updatedMaybeNestedConfig)); err != nil { + // At this point we failed to write the updated config. If we are permanently broken, do not pile the errors from observers + // but instead reset the errors and only report single error condition. + syncCtx.Recorder().Warningf("ObservedConfigWriteError", "Failed to write observed config: %v", err) + return fmt.Errorf("error writing updated observed config: %v", err) + } + return nil +} + +// updateNestedConfigHelper returns a helper function for updating the nested config. +func (c ConfigObserver) updateNestedConfigHelper(updatedNestedConfig map[string]interface{}) v1helpers.UpdateOperatorSpecFunc { + return func(currentSpec *operatorv1.OperatorSpec) error { + existingConfig := map[string]interface{}{} + if err := json.NewDecoder(bytes.NewBuffer(currentSpec.ObservedConfig.Raw)).Decode(&existingConfig); err != nil { + klog.V(4).Infof("decode of existing config failed with error: %v", err) + } + if err := unstructured.SetNestedField(existingConfig, updatedNestedConfig, c.nestedConfigPath...); err != nil { + return fmt.Errorf("unable to set the nested (%q) observed config: %v", strings.Join(c.nestedConfigPath, "/"), err) + } + currentSpec.ObservedConfig = runtime.RawExtension{Object: &unstructured.Unstructured{Object: existingConfig}} + return nil + } +} + +// listersToInformer converts the Listers interface to informer with empty AddEventHandler as we only care about synced caches in the Run. +func listersToInformer(l Listers) []factory.Informer { + result := make([]factory.Informer, len(l.PreRunHasSynced())) + for i := range l.PreRunHasSynced() { + result[i] = &listerInformer{cacheSynced: l.PreRunHasSynced()[i]} + } + return result +} + +type listerInformer struct { + cacheSynced cache.InformerSynced +} + +func (l *listerInformer) AddEventHandler(cache.ResourceEventHandler) (cache.ResourceEventHandlerRegistration, error) { + return nil, nil +} + +func (l *listerInformer) HasSynced() bool { + return l.cacheSynced() +} + +// WithPrefix adds a prefix to the path the input observer would otherwise observe into +func WithPrefix(observer ObserveConfigFunc, prefix ...string) ObserveConfigFunc { + if len(prefix) == 0 { + return observer + } + + return func(listers Listers, recorder events.Recorder, existingConfig map[string]interface{}) (map[string]interface{}, []error) { + errs := []error{} + + nestedExistingConfig, _, err := unstructured.NestedMap(existingConfig, prefix...) + if err != nil { + errs = append(errs, err) + } + + orig, observerErrs := observer(listers, recorder, nestedExistingConfig) + errs = append(errs, observerErrs...) + + if orig == nil { + return nil, errs + } + + ret := map[string]interface{}{} + if err := unstructured.SetNestedField(ret, orig, prefix...); err != nil { + errs = append(errs, err) + } + return ret, errs + + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/configobserver/featuregates/featuregate.go b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/featuregates/featuregate.go new file mode 100644 index 000000000..5ff0f3af0 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/featuregates/featuregate.go @@ -0,0 +1,47 @@ +package featuregates + +import ( + "fmt" + configv1 "github.com/openshift/api/config/v1" + "k8s.io/apimachinery/pkg/util/sets" +) + +// FeatureGate indicates whether a given feature is enabled or not +// This interface is heavily influenced by k8s.io/component-base, but not exactly compatible. +type FeatureGate interface { + // Enabled returns true if the key is enabled. + Enabled(key configv1.FeatureGateName) bool + // KnownFeatures returns a slice of strings describing the FeatureGate's known features. + KnownFeatures() []configv1.FeatureGateName +} + +type featureGate struct { + enabled sets.Set[configv1.FeatureGateName] + disabled sets.Set[configv1.FeatureGateName] +} + +func NewFeatureGate(enabled, disabled []configv1.FeatureGateName) FeatureGate { + return &featureGate{ + enabled: sets.New[configv1.FeatureGateName](enabled...), + disabled: sets.New[configv1.FeatureGateName](disabled...), + } +} + +func (f *featureGate) Enabled(key configv1.FeatureGateName) bool { + if f.enabled.Has(key) { + return true + } + if f.disabled.Has(key) { + return false + } + + panic(fmt.Errorf("feature %q is not registered in FeatureGates %v", key, f.KnownFeatures())) +} + +func (f *featureGate) KnownFeatures() []configv1.FeatureGateName { + allKnown := sets.NewString() + allKnown.Insert(FeatureGateNamesToStrings(f.enabled.UnsortedList())...) + allKnown.Insert(FeatureGateNamesToStrings(f.disabled.UnsortedList())...) + + return StringsToFeatureGateNames(allKnown.List()) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/configobserver/featuregates/hardcoded_featuregate_reader.go b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/featuregates/hardcoded_featuregate_reader.go new file mode 100644 index 000000000..58ae71763 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/featuregates/hardcoded_featuregate_reader.go @@ -0,0 +1,78 @@ +package featuregates + +import ( + "context" + "fmt" + + configv1 "github.com/openshift/api/config/v1" +) + +type hardcodedFeatureGateAccess struct { + enabled []configv1.FeatureGateName + disabled []configv1.FeatureGateName + readErr error + + initialFeatureGatesObserved chan struct{} +} + +// NewHardcodedFeatureGateAccess returns a FeatureGateAccess that is always initialized and always +// returns the provided feature gates. +func NewHardcodedFeatureGateAccess(enabled, disabled []configv1.FeatureGateName) FeatureGateAccess { + initialFeatureGatesObserved := make(chan struct{}) + close(initialFeatureGatesObserved) + c := &hardcodedFeatureGateAccess{ + enabled: enabled, + disabled: disabled, + initialFeatureGatesObserved: initialFeatureGatesObserved, + } + + return c +} + +// NewHardcodedFeatureGateAccessForTesting returns a FeatureGateAccess that returns stub responses +// using caller-supplied values. +func NewHardcodedFeatureGateAccessForTesting(enabled, disabled []configv1.FeatureGateName, initialFeatureGatesObserved chan struct{}, readErr error) FeatureGateAccess { + return &hardcodedFeatureGateAccess{ + enabled: enabled, + disabled: disabled, + initialFeatureGatesObserved: initialFeatureGatesObserved, + readErr: readErr, + } +} + +func (c *hardcodedFeatureGateAccess) SetChangeHandler(featureGateChangeHandlerFn FeatureGateChangeHandlerFunc) { + // ignore +} + +func (c *hardcodedFeatureGateAccess) Run(ctx context.Context) { + // ignore +} + +func (c *hardcodedFeatureGateAccess) InitialFeatureGatesObserved() <-chan struct{} { + return c.initialFeatureGatesObserved +} + +func (c *hardcodedFeatureGateAccess) AreInitialFeatureGatesObserved() bool { + select { + case <-c.InitialFeatureGatesObserved(): + return true + default: + return false + } +} + +func (c *hardcodedFeatureGateAccess) CurrentFeatureGates() (FeatureGate, error) { + return NewFeatureGate(c.enabled, c.disabled), c.readErr +} + +// NewHardcodedFeatureGateAccessFromFeatureGate returns a FeatureGateAccess that is static and initialised from +// a populated FeatureGate status. +// If the desired version is missing, this will return an error. +func NewHardcodedFeatureGateAccessFromFeatureGate(featureGate *configv1.FeatureGate, desiredVersion string) (FeatureGateAccess, error) { + features, err := featuresFromFeatureGate(featureGate, desiredVersion) + if err != nil { + return nil, fmt.Errorf("unable to determine features: %w", err) + } + + return NewHardcodedFeatureGateAccess(features.Enabled, features.Disabled), nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/configobserver/featuregates/observe_featuregates.go b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/featuregates/observe_featuregates.go new file mode 100644 index 000000000..0f2cb85fd --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/featuregates/observe_featuregates.go @@ -0,0 +1,118 @@ +package featuregates + +import ( + "fmt" + "reflect" + "strings" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/util/sets" + + configv1 "github.com/openshift/api/config/v1" + "github.com/openshift/library-go/pkg/operator/configobserver" + "github.com/openshift/library-go/pkg/operator/events" +) + +// NewObserveFeatureFlagsFunc produces a configobserver for feature gates. If non-nil, the featureWhitelist filters +// feature gates to a known subset (instead of everything). The featureBlacklist will stop certain features from making +// it through the list. The featureBlacklist should be empty, but for a brief time, some featuregates may need to skipped. +// @smarterclayton will live forever in shame for being the first to require this for "IPv6DualStack". +func NewObserveFeatureFlagsFunc(featureWhitelist sets.Set[configv1.FeatureGateName], featureBlacklist sets.Set[configv1.FeatureGateName], configPath []string, featureGateAccess FeatureGateAccess) configobserver.ObserveConfigFunc { + return (&featureFlags{ + allowAll: len(featureWhitelist) == 0, + featureWhitelist: featureWhitelist, + featureBlacklist: featureBlacklist, + configPath: configPath, + featureGateAccess: featureGateAccess, + }).ObserveFeatureFlags +} + +type featureFlags struct { + allowAll bool + featureWhitelist sets.Set[configv1.FeatureGateName] + // we add a forceDisableFeature list because we've now had bad featuregates break individual operators. Awesome. + featureBlacklist sets.Set[configv1.FeatureGateName] + configPath []string + featureGateAccess FeatureGateAccess +} + +// ObserveFeatureFlags fills in --feature-flags for the kube-apiserver +func (f *featureFlags) ObserveFeatureFlags(genericListers configobserver.Listers, recorder events.Recorder, existingConfig map[string]interface{}) (map[string]interface{}, []error) { + prunedExistingConfig := configobserver.Pruned(existingConfig, f.configPath) + + errs := []error{} + + if !f.featureGateAccess.AreInitialFeatureGatesObserved() { + // if we haven't observed featuregates yet, return the existing + return prunedExistingConfig, nil + } + + featureGates, err := f.featureGateAccess.CurrentFeatureGates() + if err != nil { + return prunedExistingConfig, append(errs, err) + } + observedConfig := map[string]interface{}{} + newConfigValue := f.getWhitelistedFeatureNames(featureGates) + + currentConfigValue, _, err := unstructured.NestedStringSlice(existingConfig, f.configPath...) + if err != nil { + errs = append(errs, err) + // keep going on read error from existing config + } + if !reflect.DeepEqual(currentConfigValue, newConfigValue) { + recorder.Eventf("ObserveFeatureFlagsUpdated", "Updated %v to %s", strings.Join(f.configPath, "."), strings.Join(newConfigValue, ",")) + } + + if err := unstructured.SetNestedStringSlice(observedConfig, newConfigValue, f.configPath...); err != nil { + recorder.Warningf("ObserveFeatureFlags", "Failed setting %v: %v", strings.Join(f.configPath, "."), err) + return prunedExistingConfig, append(errs, err) + } + + return configobserver.Pruned(observedConfig, f.configPath), errs +} + +func (f *featureFlags) getWhitelistedFeatureNames(featureGates FeatureGate) []string { + newConfigValue := []string{} + formatEnabledFunc := func(fs configv1.FeatureGateName) string { + return fmt.Sprintf("%v=true", fs) + } + formatDisabledFunc := func(fs configv1.FeatureGateName) string { + return fmt.Sprintf("%v=false", fs) + } + + for _, knownFeatureGate := range featureGates.KnownFeatures() { + if f.featureBlacklist.Has(knownFeatureGate) { + continue + } + // only add whitelisted feature flags + if !f.allowAll && !f.featureWhitelist.Has(knownFeatureGate) { + continue + } + + if featureGates.Enabled(knownFeatureGate) { + newConfigValue = append(newConfigValue, formatEnabledFunc(knownFeatureGate)) + } else { + newConfigValue = append(newConfigValue, formatDisabledFunc(knownFeatureGate)) + } + } + + return newConfigValue +} + +func StringsToFeatureGateNames(in []string) []configv1.FeatureGateName { + out := []configv1.FeatureGateName{} + for _, curr := range in { + out = append(out, configv1.FeatureGateName(curr)) + } + + return out +} + +func FeatureGateNamesToStrings(in []configv1.FeatureGateName) []string { + out := []string{} + for _, curr := range in { + out = append(out, string(curr)) + } + + return out +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/configobserver/featuregates/simple_featuregate_reader.go b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/featuregates/simple_featuregate_reader.go new file mode 100644 index 000000000..4b2caccd6 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/featuregates/simple_featuregate_reader.go @@ -0,0 +1,318 @@ +package featuregates + +import ( + "context" + "fmt" + "os" + "reflect" + "sync" + "time" + + configv1 "github.com/openshift/api/config/v1" + + v1 "github.com/openshift/client-go/config/informers/externalversions/config/v1" + configlistersv1 "github.com/openshift/client-go/config/listers/config/v1" + "github.com/openshift/library-go/pkg/operator/events" + apierrors "k8s.io/apimachinery/pkg/api/errors" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + "k8s.io/klog/v2" +) + +type FeatureGateChangeHandlerFunc func(featureChange FeatureChange) + +// FeatureGateAccess is used to get a list of enabled and disabled featuregates. +// Create a new instance using NewFeatureGateAccess. +// To create one for unit testing, use NewHardcodedFeatureGateAccess. +type FeatureGateAccess interface { + // SetChangeHandler can only be called before Run. + // The default change handler will exit 0 when the set of featuregates changes. + // That is usually the easiest and simplest thing for an *operator* to do. + // This also discourages direct operand reading since all operands restarting simultaneously is bad. + // This function allows changing that default behavior to something else (perhaps a channel notification for + // all impacted controllers in an operator. + // I doubt this will be worth the effort in the majority of cases. + SetChangeHandler(featureGateChangeHandlerFn FeatureGateChangeHandlerFunc) + + // Run starts a go func that continously watches the set of featuregates enabled in the cluster. + Run(ctx context.Context) + // InitialFeatureGatesObserved returns a channel that is closed once the featuregates have + // been observed. Once closed, the CurrentFeatureGates method will return the current set of + // featuregates and will never return a non-nil error. + InitialFeatureGatesObserved() <-chan struct{} + // CurrentFeatureGates returns the list of enabled and disabled featuregates. + // It returns an error if the current set of featuregates is not known. + CurrentFeatureGates() (FeatureGate, error) + // AreInitialFeatureGatesObserved returns true if the initial featuregates have been observed. + AreInitialFeatureGatesObserved() bool +} + +type Features struct { + Enabled []configv1.FeatureGateName + Disabled []configv1.FeatureGateName +} + +type FeatureChange struct { + Previous *Features + New Features +} + +type defaultFeatureGateAccess struct { + desiredVersion string + missingVersionMarker string + clusterVersionLister configlistersv1.ClusterVersionLister + featureGateLister configlistersv1.FeatureGateLister + initialFeatureGatesObserved chan struct{} + + featureGateChangeHandlerFn FeatureGateChangeHandlerFunc + + lock sync.Mutex + started bool + initialFeatures Features + currentFeatures Features + + queue workqueue.RateLimitingInterface + eventRecorder events.Recorder +} + +// NewFeatureGateAccess returns a controller that keeps the list of enabled/disabled featuregates up to date. +// desiredVersion is the version of this operator that would be set on the clusteroperator.status.versions. +// missingVersionMarker is the stub version provided by the operator. If that is also the desired version, +// then the most either the desired clusterVersion or most recent version will be used. +// clusterVersionInformer is used when desiredVersion and missingVersionMarker are the same to derive the "best" version +// of featuregates to use. +// featureGateInformer is used to track changes to the featureGates once they are initially set. +// By default, when the enabled/disabled list of featuregates changes, os.Exit is called. This behavior can be +// overridden by calling SetChangeHandler to whatever you wish the behavior to be. +// A common construct is: +/* go +featureGateAccessor := NewFeatureGateAccess(args) +go featureGateAccessor.Run(ctx) + +select{ +case <- featureGateAccessor.InitialFeatureGatesObserved(): + featureGates, _ := featureGateAccessor.CurrentFeatureGates() + klog.Infof("FeatureGates initialized: knownFeatureGates=%v", featureGates.KnownFeatures()) +case <- time.After(1*time.Minute): + klog.Errorf("timed out waiting for FeatureGate detection") + return fmt.Errorf("timed out waiting for FeatureGate detection") +} + +// whatever other initialization you have to do, at this point you have FeatureGates to drive your behavior. +*/ +// That construct is easy. It is better to use the .spec.observedConfiguration construct common in library-go operators +// to avoid gating your general startup on FeatureGate determination, but if you haven't already got that mechanism +// this construct is easy. +func NewFeatureGateAccess( + desiredVersion, missingVersionMarker string, + clusterVersionInformer v1.ClusterVersionInformer, + featureGateInformer v1.FeatureGateInformer, + eventRecorder events.Recorder) FeatureGateAccess { + c := &defaultFeatureGateAccess{ + desiredVersion: desiredVersion, + missingVersionMarker: missingVersionMarker, + clusterVersionLister: clusterVersionInformer.Lister(), + featureGateLister: featureGateInformer.Lister(), + initialFeatureGatesObserved: make(chan struct{}), + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "feature-gate-detector"), + eventRecorder: eventRecorder, + } + c.SetChangeHandler(ForceExit) + + // we aren't expecting many + clusterVersionInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + c.queue.Add("cluster") + }, + UpdateFunc: func(old, cur interface{}) { + c.queue.Add("cluster") + }, + DeleteFunc: func(uncast interface{}) { + c.queue.Add("cluster") + }, + }) + featureGateInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + c.queue.Add("cluster") + }, + UpdateFunc: func(old, cur interface{}) { + c.queue.Add("cluster") + }, + DeleteFunc: func(uncast interface{}) { + c.queue.Add("cluster") + }, + }) + + return c +} + +func ForceExit(featureChange FeatureChange) { + if featureChange.Previous != nil { + os.Exit(0) + } +} + +func (c *defaultFeatureGateAccess) SetChangeHandler(featureGateChangeHandlerFn FeatureGateChangeHandlerFunc) { + c.lock.Lock() + defer c.lock.Unlock() + + if c.started { + panic("programmer error, cannot update the change handler after starting") + } + c.featureGateChangeHandlerFn = featureGateChangeHandlerFn +} + +func (c *defaultFeatureGateAccess) Run(ctx context.Context) { + defer utilruntime.HandleCrash() + defer c.queue.ShutDown() + + klog.Infof("Starting feature-gate-detector") + defer klog.Infof("Shutting down feature-gate-detector") + + go wait.UntilWithContext(ctx, c.runWorker, time.Second) + + <-ctx.Done() +} + +func (c *defaultFeatureGateAccess) syncHandler(ctx context.Context) error { + desiredVersion := c.desiredVersion + if c.missingVersionMarker == c.desiredVersion { + clusterVersion, err := c.clusterVersionLister.Get("version") + if apierrors.IsNotFound(err) { + return nil // we will be re-triggered when it is created + } + if err != nil { + return err + } + + desiredVersion = clusterVersion.Status.Desired.Version + if len(desiredVersion) == 0 && len(clusterVersion.Status.History) > 0 { + desiredVersion = clusterVersion.Status.History[0].Version + } + } + + featureGate, err := c.featureGateLister.Get("cluster") + if apierrors.IsNotFound(err) { + return nil // we will be re-triggered when it is created + } + if err != nil { + return err + } + + features, err := featuresFromFeatureGate(featureGate, desiredVersion) + if err != nil { + return fmt.Errorf("unable to determine features: %w", err) + } + + c.setFeatureGates(features) + + return nil +} + +func (c *defaultFeatureGateAccess) setFeatureGates(features Features) { + c.lock.Lock() + defer c.lock.Unlock() + + var previousFeatures *Features + if c.AreInitialFeatureGatesObserved() { + t := c.currentFeatures + previousFeatures = &t + } + + c.currentFeatures = features + + if !c.AreInitialFeatureGatesObserved() { + c.initialFeatures = features + close(c.initialFeatureGatesObserved) + c.eventRecorder.Eventf("FeatureGatesInitialized", "FeatureGates updated to %#v", c.currentFeatures) + } + + if previousFeatures == nil || !reflect.DeepEqual(*previousFeatures, c.currentFeatures) { + if previousFeatures != nil { + c.eventRecorder.Eventf("FeatureGatesModified", "FeatureGates updated to %#v", c.currentFeatures) + } + + c.featureGateChangeHandlerFn(FeatureChange{ + Previous: previousFeatures, + New: c.currentFeatures, + }) + } +} + +func (c *defaultFeatureGateAccess) InitialFeatureGatesObserved() <-chan struct{} { + return c.initialFeatureGatesObserved +} + +func (c *defaultFeatureGateAccess) AreInitialFeatureGatesObserved() bool { + select { + case <-c.InitialFeatureGatesObserved(): + return true + default: + return false + } +} + +func (c *defaultFeatureGateAccess) CurrentFeatureGates() (FeatureGate, error) { + c.lock.Lock() + defer c.lock.Unlock() + + if !c.AreInitialFeatureGatesObserved() { + return nil, fmt.Errorf("featureGates not yet observed") + } + retEnabled := make([]configv1.FeatureGateName, len(c.currentFeatures.Enabled)) + retDisabled := make([]configv1.FeatureGateName, len(c.currentFeatures.Disabled)) + copy(retEnabled, c.currentFeatures.Enabled) + copy(retDisabled, c.currentFeatures.Disabled) + + return NewFeatureGate(retEnabled, retDisabled), nil +} + +func (c *defaultFeatureGateAccess) runWorker(ctx context.Context) { + for c.processNextWorkItem(ctx) { + } +} + +func (c *defaultFeatureGateAccess) processNextWorkItem(ctx context.Context) bool { + dsKey, quit := c.queue.Get() + if quit { + return false + } + defer c.queue.Done(dsKey) + + err := c.syncHandler(ctx) + if err == nil { + c.queue.Forget(dsKey) + return true + } + + utilruntime.HandleError(fmt.Errorf("%v failed with : %v", dsKey, err)) + c.queue.AddRateLimited(dsKey) + + return true +} + +func featuresFromFeatureGate(featureGate *configv1.FeatureGate, desiredVersion string) (Features, error) { + found := false + features := Features{} + for _, featureGateValues := range featureGate.Status.FeatureGates { + if featureGateValues.Version != desiredVersion { + continue + } + found = true + for _, enabled := range featureGateValues.Enabled { + features.Enabled = append(features.Enabled, enabled.Name) + } + for _, disabled := range featureGateValues.Disabled { + features.Disabled = append(features.Disabled, disabled.Name) + } + break + } + + if !found { + return Features{}, fmt.Errorf("missing desired version %q in featuregates.config.openshift.io/cluster", desiredVersion) + } + + return features, nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/configobserver/unstructured.go b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/unstructured.go new file mode 100644 index 000000000..27b92d0fa --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/configobserver/unstructured.go @@ -0,0 +1,45 @@ +package configobserver + +import ( + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" +) + +// Pruned returns the unstructured filtered by the given paths, i.e. everything +// outside of them will be dropped. The returned data structure might overlap +// with the input, but the input is not mutated. In case of error for a path, +// that path is dropped. +func Pruned(obj map[string]interface{}, pths ...[]string) map[string]interface{} { + if obj == nil || len(pths) == 0 { + return obj + } + + ret := map[string]interface{}{} + if len(pths) == 1 { + x, found, err := unstructured.NestedFieldCopy(obj, pths[0]...) + if err != nil || !found { + return ret + } + unstructured.SetNestedField(ret, x, pths[0]...) + return ret + } + + for i, p := range pths { + x, found, err := unstructured.NestedFieldCopy(obj, p...) + if err != nil { + continue + } + if !found { + continue + } + if i < len(pths)-1 { + // this might be overwritten by a later path + x = runtime.DeepCopyJSONValue(x) + } + if err := unstructured.SetNestedField(ret, x, p...); err != nil { + continue + } + } + + return ret +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/events/OWNERS b/vendor/github.com/openshift/library-go/pkg/operator/events/OWNERS new file mode 100644 index 000000000..4f189b708 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/events/OWNERS @@ -0,0 +1,8 @@ +reviewers: + - mfojtik + - deads2k + - sttts +approvers: + - mfojtik + - deads2k + - sttts diff --git a/vendor/github.com/openshift/library-go/pkg/operator/events/recorder.go b/vendor/github.com/openshift/library-go/pkg/operator/events/recorder.go new file mode 100644 index 000000000..f513a90f3 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/events/recorder.go @@ -0,0 +1,238 @@ +package events + +import ( + "context" + "errors" + "fmt" + "os" + "time" + + "k8s.io/client-go/kubernetes" + "k8s.io/klog/v2" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" +) + +// Recorder is a simple event recording interface. +type Recorder interface { + Event(reason, message string) + Eventf(reason, messageFmt string, args ...interface{}) + Warning(reason, message string) + Warningf(reason, messageFmt string, args ...interface{}) + + // ForComponent allows to fiddle the component name before sending the event to sink. + // Making more unique components will prevent the spam filter in upstream event sink from dropping + // events. + ForComponent(componentName string) Recorder + + // WithComponentSuffix is similar to ForComponent except it just suffix the current component name instead of overriding. + WithComponentSuffix(componentNameSuffix string) Recorder + + // WithContext allows to set a context for event create API calls. + WithContext(ctx context.Context) Recorder + + // ComponentName returns the current source component name for the event. + // This allows to suffix the original component name with 'sub-component'. + ComponentName() string + + Shutdown() +} + +// podNameEnv is a name of environment variable inside container that specifies the name of the current replica set. +// This replica set name is then used as a source/involved object for operator events. +const podNameEnv = "POD_NAME" + +// podNameEnvFunc allows to override the way we get the environment variable value (for unit tests). +var podNameEnvFunc = func() string { + return os.Getenv(podNameEnv) +} + +// GetControllerReferenceForCurrentPod provides an object reference to a controller managing the pod/container where this process runs. +// The pod name must be provided via the POD_NAME name. +// Even if this method returns an error, it always return valid reference to the namespace. It allows the callers to control the logging +// and decide to fail or accept the namespace. +func GetControllerReferenceForCurrentPod(ctx context.Context, client kubernetes.Interface, targetNamespace string, reference *corev1.ObjectReference) (*corev1.ObjectReference, error) { + if reference == nil { + // Try to get the pod name via POD_NAME environment variable + reference := &corev1.ObjectReference{Kind: "Pod", Name: podNameEnvFunc(), Namespace: targetNamespace} + if len(reference.Name) != 0 { + return GetControllerReferenceForCurrentPod(ctx, client, targetNamespace, reference) + } + // If that fails, lets try to guess the pod by listing all pods in namespaces and using the first pod in the list + reference, err := guessControllerReferenceForNamespace(ctx, client.CoreV1().Pods(targetNamespace)) + if err != nil { + // If this fails, do not give up with error but instead use the namespace as controller reference for the pod + // NOTE: This is last resort, if we see this often it might indicate something is wrong in the cluster. + // In some cases this might help with flakes. + return getControllerReferenceForNamespace(targetNamespace), err + } + return GetControllerReferenceForCurrentPod(ctx, client, targetNamespace, reference) + } + + switch reference.Kind { + case "Pod": + pod, err := client.CoreV1().Pods(reference.Namespace).Get(ctx, reference.Name, metav1.GetOptions{}) + if err != nil { + return getControllerReferenceForNamespace(reference.Namespace), err + } + if podController := metav1.GetControllerOf(pod); podController != nil { + return GetControllerReferenceForCurrentPod(ctx, client, targetNamespace, makeObjectReference(podController, targetNamespace)) + } + // This is a bare pod without any ownerReference + return makeObjectReference(&metav1.OwnerReference{Kind: "Pod", Name: pod.Name, UID: pod.UID, APIVersion: "v1"}, pod.Namespace), nil + case "ReplicaSet": + rs, err := client.AppsV1().ReplicaSets(reference.Namespace).Get(ctx, reference.Name, metav1.GetOptions{}) + if err != nil { + return getControllerReferenceForNamespace(reference.Namespace), err + } + if rsController := metav1.GetControllerOf(rs); rsController != nil { + return GetControllerReferenceForCurrentPod(ctx, client, targetNamespace, makeObjectReference(rsController, targetNamespace)) + } + // This is a replicaSet without any ownerReference + return reference, nil + default: + return reference, nil + } +} + +// getControllerReferenceForNamespace returns an object reference to the given namespace. +func getControllerReferenceForNamespace(targetNamespace string) *corev1.ObjectReference { + return &corev1.ObjectReference{ + Kind: "Namespace", + Namespace: targetNamespace, + Name: targetNamespace, + APIVersion: "v1", + } +} + +// makeObjectReference makes object reference from ownerReference and target namespace +func makeObjectReference(owner *metav1.OwnerReference, targetNamespace string) *corev1.ObjectReference { + return &corev1.ObjectReference{ + Kind: owner.Kind, + Namespace: targetNamespace, + Name: owner.Name, + UID: owner.UID, + APIVersion: owner.APIVersion, + } +} + +// guessControllerReferenceForNamespace tries to guess what resource to reference. +func guessControllerReferenceForNamespace(ctx context.Context, client corev1client.PodInterface) (*corev1.ObjectReference, error) { + pods, err := client.List(ctx, metav1.ListOptions{}) + if err != nil { + return nil, err + } + if len(pods.Items) == 0 { + return nil, fmt.Errorf("unable to setup event recorder as %q env variable is not set and there are no pods", podNameEnv) + } + + for _, pod := range pods.Items { + ownerRef := metav1.GetControllerOf(&pod) + if ownerRef == nil { + continue + } + return &corev1.ObjectReference{ + Kind: ownerRef.Kind, + Namespace: pod.Namespace, + Name: ownerRef.Name, + UID: ownerRef.UID, + APIVersion: ownerRef.APIVersion, + }, nil + } + return nil, errors.New("can't guess controller ref") +} + +// NewRecorder returns new event recorder. +func NewRecorder(client corev1client.EventInterface, sourceComponentName string, involvedObjectRef *corev1.ObjectReference) Recorder { + return &recorder{ + eventClient: client, + involvedObjectRef: involvedObjectRef, + sourceComponent: sourceComponentName, + } +} + +// recorder is an implementation of Recorder interface. +type recorder struct { + eventClient corev1client.EventInterface + involvedObjectRef *corev1.ObjectReference + sourceComponent string + + // TODO: This is not the right way to pass the context, but there is no other way without breaking event interface + ctx context.Context +} + +func (r *recorder) ComponentName() string { + return r.sourceComponent +} + +func (r *recorder) Shutdown() {} + +func (r *recorder) ForComponent(componentName string) Recorder { + newRecorderForComponent := *r + newRecorderForComponent.sourceComponent = componentName + return &newRecorderForComponent +} + +func (r *recorder) WithContext(ctx context.Context) Recorder { + r.ctx = ctx + return r +} + +func (r *recorder) WithComponentSuffix(suffix string) Recorder { + return r.ForComponent(fmt.Sprintf("%s-%s", r.ComponentName(), suffix)) +} + +// Event emits the normal type event and allow formatting of message. +func (r *recorder) Eventf(reason, messageFmt string, args ...interface{}) { + r.Event(reason, fmt.Sprintf(messageFmt, args...)) +} + +// Warning emits the warning type event and allow formatting of message. +func (r *recorder) Warningf(reason, messageFmt string, args ...interface{}) { + r.Warning(reason, fmt.Sprintf(messageFmt, args...)) +} + +// Event emits the normal type event. +func (r *recorder) Event(reason, message string) { + event := makeEvent(r.involvedObjectRef, r.sourceComponent, corev1.EventTypeNormal, reason, message) + ctx := context.Background() + if r.ctx != nil { + ctx = r.ctx + } + if _, err := r.eventClient.Create(ctx, event, metav1.CreateOptions{}); err != nil { + klog.Warningf("Error creating event %+v: %v", event, err) + } +} + +// Warning emits the warning type event. +func (r *recorder) Warning(reason, message string) { + event := makeEvent(r.involvedObjectRef, r.sourceComponent, corev1.EventTypeWarning, reason, message) + ctx := context.Background() + if r.ctx != nil { + ctx = r.ctx + } + if _, err := r.eventClient.Create(ctx, event, metav1.CreateOptions{}); err != nil { + klog.Warningf("Error creating event %+v: %v", event, err) + } +} + +func makeEvent(involvedObjRef *corev1.ObjectReference, sourceComponent string, eventType, reason, message string) *corev1.Event { + currentTime := metav1.Time{Time: time.Now()} + event := &corev1.Event{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%v.%x", involvedObjRef.Name, currentTime.UnixNano()), + Namespace: involvedObjRef.Namespace, + }, + InvolvedObject: *involvedObjRef, + Reason: reason, + Message: message, + Type: eventType, + Count: 1, + FirstTimestamp: currentTime, + LastTimestamp: currentTime, + } + event.Source.Component = sourceComponent + return event +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/events/recorder_in_memory.go b/vendor/github.com/openshift/library-go/pkg/operator/events/recorder_in_memory.go new file mode 100644 index 000000000..75efe3e19 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/events/recorder_in_memory.go @@ -0,0 +1,86 @@ +package events + +import ( + "context" + "fmt" + "sync" + + corev1 "k8s.io/api/core/v1" + "k8s.io/klog/v2" +) + +type inMemoryEventRecorder struct { + events []*corev1.Event + source string + ctx context.Context + sync.Mutex +} + +// inMemoryDummyObjectReference is used for fake events. +var inMemoryDummyObjectReference = corev1.ObjectReference{ + Kind: "Pod", + Namespace: "dummy", + Name: "dummy", + APIVersion: "v1", +} + +type InMemoryRecorder interface { + Events() []*corev1.Event + Recorder +} + +// NewInMemoryRecorder provides event recorder that stores all events recorded in memory and allow to replay them using the Events() method. +// This recorder should be only used in unit tests. +func NewInMemoryRecorder(sourceComponent string) InMemoryRecorder { + return &inMemoryEventRecorder{events: []*corev1.Event{}, source: sourceComponent} +} + +func (r *inMemoryEventRecorder) ComponentName() string { + return r.source +} + +func (r *inMemoryEventRecorder) Shutdown() {} + +func (r *inMemoryEventRecorder) ForComponent(component string) Recorder { + r.Lock() + defer r.Unlock() + r.source = component + return r +} + +func (r *inMemoryEventRecorder) WithContext(ctx context.Context) Recorder { + r.ctx = ctx + return r +} + +func (r *inMemoryEventRecorder) WithComponentSuffix(suffix string) Recorder { + return r.ForComponent(fmt.Sprintf("%s-%s", r.ComponentName(), suffix)) +} + +// Events returns list of recorded events +func (r *inMemoryEventRecorder) Events() []*corev1.Event { + return r.events +} + +func (r *inMemoryEventRecorder) Event(reason, message string) { + r.Lock() + defer r.Unlock() + event := makeEvent(&inMemoryDummyObjectReference, r.source, corev1.EventTypeNormal, reason, message) + r.events = append(r.events, event) +} + +func (r *inMemoryEventRecorder) Eventf(reason, messageFmt string, args ...interface{}) { + r.Event(reason, fmt.Sprintf(messageFmt, args...)) +} + +func (r *inMemoryEventRecorder) Warning(reason, message string) { + r.Lock() + defer r.Unlock() + event := makeEvent(&inMemoryDummyObjectReference, r.source, corev1.EventTypeWarning, reason, message) + klog.Info(event.String()) + r.events = append(r.events, event) +} + +func (r *inMemoryEventRecorder) Warningf(reason, messageFmt string, args ...interface{}) { + r.Warning(reason, fmt.Sprintf(messageFmt, args...)) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/events/recorder_logging.go b/vendor/github.com/openshift/library-go/pkg/operator/events/recorder_logging.go new file mode 100644 index 000000000..90639f2d9 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/events/recorder_logging.go @@ -0,0 +1,58 @@ +package events + +import ( + "context" + "fmt" + + corev1 "k8s.io/api/core/v1" + "k8s.io/klog/v2" +) + +type LoggingEventRecorder struct { + component string + ctx context.Context +} + +func (r *LoggingEventRecorder) WithContext(ctx context.Context) Recorder { + r.ctx = ctx + return r +} + +// NewLoggingEventRecorder provides event recorder that will log all recorded events via klog. +func NewLoggingEventRecorder(component string) Recorder { + return &LoggingEventRecorder{component: component} +} + +func (r *LoggingEventRecorder) ComponentName() string { + return r.component +} + +func (r *LoggingEventRecorder) ForComponent(component string) Recorder { + newRecorder := *r + newRecorder.component = component + return &newRecorder +} + +func (r *LoggingEventRecorder) Shutdown() {} + +func (r *LoggingEventRecorder) WithComponentSuffix(suffix string) Recorder { + return r.ForComponent(fmt.Sprintf("%s-%s", r.ComponentName(), suffix)) +} + +func (r *LoggingEventRecorder) Event(reason, message string) { + event := makeEvent(&inMemoryDummyObjectReference, "", corev1.EventTypeNormal, reason, message) + klog.Info(event.String()) +} + +func (r *LoggingEventRecorder) Eventf(reason, messageFmt string, args ...interface{}) { + r.Event(reason, fmt.Sprintf(messageFmt, args...)) +} + +func (r *LoggingEventRecorder) Warning(reason, message string) { + event := makeEvent(&inMemoryDummyObjectReference, "", corev1.EventTypeWarning, reason, message) + klog.Warning(event.String()) +} + +func (r *LoggingEventRecorder) Warningf(reason, messageFmt string, args ...interface{}) { + r.Warning(reason, fmt.Sprintf(messageFmt, args...)) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/events/recorder_upstream.go b/vendor/github.com/openshift/library-go/pkg/operator/events/recorder_upstream.go new file mode 100644 index 000000000..0e41949a7 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/events/recorder_upstream.go @@ -0,0 +1,173 @@ +package events + +import ( + "context" + "fmt" + "strings" + "sync" + + corev1 "k8s.io/api/core/v1" + "k8s.io/client-go/kubernetes/scheme" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/tools/record" + "k8s.io/component-base/metrics" + "k8s.io/component-base/metrics/legacyregistry" + "k8s.io/klog/v2" +) + +// NewKubeRecorder returns new event recorder with tweaked correlator options. +func NewKubeRecorderWithOptions(client corev1client.EventInterface, options record.CorrelatorOptions, sourceComponentName string, involvedObjectRef *corev1.ObjectReference) Recorder { + return (&upstreamRecorder{ + client: client, + component: sourceComponentName, + involvedObjectRef: involvedObjectRef, + options: options, + fallbackRecorder: NewRecorder(client, sourceComponentName, involvedObjectRef), + }).ForComponent(sourceComponentName) +} + +// NewKubeRecorder returns new event recorder with default correlator options. +func NewKubeRecorder(client corev1client.EventInterface, sourceComponentName string, involvedObjectRef *corev1.ObjectReference) Recorder { + return NewKubeRecorderWithOptions(client, record.CorrelatorOptions{}, sourceComponentName, involvedObjectRef) +} + +// upstreamRecorder is an implementation of Recorder interface. +type upstreamRecorder struct { + client corev1client.EventInterface + clientCtx context.Context + component string + broadcaster record.EventBroadcaster + eventRecorder record.EventRecorder + involvedObjectRef *corev1.ObjectReference + options record.CorrelatorOptions + + // shuttingDown indicates that the broadcaster for this recorder is being shut down + shuttingDown bool + shutdownMutex sync.RWMutex + + // fallbackRecorder is used when the kube recorder is shutting down + // in that case we create the events directly. + fallbackRecorder Recorder +} + +func (r *upstreamRecorder) WithContext(ctx context.Context) Recorder { + r.clientCtx = ctx + return r +} + +// RecommendedClusterSingletonCorrelatorOptions provides recommended event correlator options for components that produce +// many events (like operators). +func RecommendedClusterSingletonCorrelatorOptions() record.CorrelatorOptions { + return record.CorrelatorOptions{ + BurstSize: 60, // default: 25 (change allows a single source to send 50 events about object per minute) + QPS: 1. / 1., // default: 1/300 (change allows refill rate to 1 new event every 1s) + KeyFunc: func(event *corev1.Event) (aggregateKey string, localKey string) { + return strings.Join([]string{ + event.Source.Component, + event.Source.Host, + event.InvolvedObject.Kind, + event.InvolvedObject.Namespace, + event.InvolvedObject.Name, + string(event.InvolvedObject.UID), + event.InvolvedObject.APIVersion, + event.Type, + event.Reason, + // By default, KeyFunc don't use message for aggregation, this cause events with different message, but same reason not be lost as "similar events". + event.Message, + }, ""), event.Message + }, + } +} + +var eventsCounterMetric = metrics.NewCounterVec(&metrics.CounterOpts{ + Subsystem: "event_recorder", + Name: "total_events_count", + Help: "Total count of events processed by this event recorder per involved object", + StabilityLevel: metrics.ALPHA, +}, []string{"severity"}) + +func init() { + (&sync.Once{}).Do(func() { + legacyregistry.MustRegister(eventsCounterMetric) + }) +} + +func (r *upstreamRecorder) ForComponent(componentName string) Recorder { + newRecorderForComponent := upstreamRecorder{ + client: r.client, + fallbackRecorder: r.fallbackRecorder.WithComponentSuffix(componentName), + options: r.options, + involvedObjectRef: r.involvedObjectRef, + shuttingDown: r.shuttingDown, + } + + // tweak the event correlator, so we don't loose important events. + broadcaster := record.NewBroadcasterWithCorrelatorOptions(r.options) + broadcaster.StartLogging(klog.Infof) + broadcaster.StartRecordingToSink(&corev1client.EventSinkImpl{Interface: newRecorderForComponent.client}) + + newRecorderForComponent.eventRecorder = broadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: componentName}) + newRecorderForComponent.broadcaster = broadcaster + newRecorderForComponent.component = componentName + + return &newRecorderForComponent +} + +func (r *upstreamRecorder) Shutdown() { + r.shutdownMutex.Lock() + r.shuttingDown = true + r.shutdownMutex.Unlock() + // Wait for broadcaster to flush events (this is blocking) + // TODO: There is still race condition in upstream that might cause panic() on events recorded after the shutdown + // is called as the event recording is not-blocking (go routine based). + r.broadcaster.Shutdown() +} + +func (r *upstreamRecorder) WithComponentSuffix(suffix string) Recorder { + return r.ForComponent(fmt.Sprintf("%s-%s", r.ComponentName(), suffix)) +} + +func (r *upstreamRecorder) ComponentName() string { + return r.component +} + +// Eventf emits the normal type event and allow formatting of message. +func (r *upstreamRecorder) Eventf(reason, messageFmt string, args ...interface{}) { + r.Event(reason, fmt.Sprintf(messageFmt, args...)) +} + +// Warningf emits the warning type event and allow formatting of message. +func (r *upstreamRecorder) Warningf(reason, messageFmt string, args ...interface{}) { + r.Warning(reason, fmt.Sprintf(messageFmt, args...)) +} + +func (r *upstreamRecorder) incrementEventsCounter(severity string) { + if r.involvedObjectRef == nil { + return + } + eventsCounterMetric.WithLabelValues(severity).Inc() +} + +// Event emits the normal type event. +func (r *upstreamRecorder) Event(reason, message string) { + r.shutdownMutex.RLock() + defer r.shutdownMutex.RUnlock() + defer r.incrementEventsCounter(corev1.EventTypeNormal) + if r.shuttingDown { + r.fallbackRecorder.Event(reason, message) + return + } + r.eventRecorder.Event(r.involvedObjectRef, corev1.EventTypeNormal, reason, message) +} + +// Warning emits the warning type event. +func (r *upstreamRecorder) Warning(reason, message string) { + r.shutdownMutex.RLock() + defer r.shutdownMutex.RUnlock() + defer r.incrementEventsCounter(corev1.EventTypeWarning) + if r.shuttingDown { + r.fallbackRecorder.Warning(reason, message) + return + } + r.eventRecorder.Event(r.involvedObjectRef, corev1.EventTypeWarning, reason, message) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/management/management_state.go b/vendor/github.com/openshift/library-go/pkg/operator/management/management_state.go new file mode 100644 index 000000000..294770f3e --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/management/management_state.go @@ -0,0 +1,77 @@ +package management + +import ( + v1 "github.com/openshift/api/operator/v1" +) + +var ( + allowOperatorUnmanagedState = true + allowOperatorRemovedState = true +) + +// SetOperatorAlwaysManaged is one time choice when an operator want to opt-out from supporting the "unmanaged" state. +// This is a case of control plane operators or operators that are required to always run otherwise the cluster will +// get into unstable state or critical components will stop working. +func SetOperatorAlwaysManaged() { + allowOperatorUnmanagedState = false +} + +// SetOperatorUnmanageable is one time choice when an operator wants to support the "unmanaged" state. +// This is the default setting, provided here mostly for unit tests. +func SetOperatorUnmanageable() { + allowOperatorUnmanagedState = true +} + +// SetOperatorNotRemovable is one time choice the operator author can make to indicate the operator does not support +// removing of his operand. This makes sense for operators like kube-apiserver where removing operand will lead to a +// bricked, non-automatically recoverable state. +func SetOperatorNotRemovable() { + allowOperatorRemovedState = false +} + +// SetOperatorRemovable is one time choice the operator author can make to indicate the operator supports +// removing of his operand. +// This is the default setting, provided here mostly for unit tests. +func SetOperatorRemovable() { + allowOperatorRemovedState = true +} + +// IsOperatorAlwaysManaged means the operator can't be set to unmanaged state. +func IsOperatorAlwaysManaged() bool { + return !allowOperatorUnmanagedState +} + +// IsOperatorNotRemovable means the operator can't be set to removed state. +func IsOperatorNotRemovable() bool { + return !allowOperatorRemovedState +} + +// IsOperatorRemovable means the operator can be set to removed state. +func IsOperatorRemovable() bool { + return allowOperatorRemovedState +} + +func IsOperatorUnknownState(state v1.ManagementState) bool { + switch state { + case v1.Managed, v1.Removed, v1.Unmanaged: + return false + default: + return true + } +} + +// IsOperatorManaged indicates whether the operator management state allows the control loop to proceed and manage the operand. +func IsOperatorManaged(state v1.ManagementState) bool { + if IsOperatorAlwaysManaged() || IsOperatorNotRemovable() { + return true + } + switch state { + case v1.Managed: + return true + case v1.Removed: + return false + case v1.Unmanaged: + return false + } + return true +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/admissionregistration.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/admissionregistration.go new file mode 100644 index 000000000..fafa39c40 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/admissionregistration.go @@ -0,0 +1,166 @@ +package resourceapply + +import ( + "context" + "fmt" + + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/resource/resourcemerge" + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + "k8s.io/apimachinery/pkg/api/equality" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + admissionregistrationclientv1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1" + "k8s.io/klog/v2" +) + +// ApplyMutatingWebhookConfigurationImproved ensures the form of the specified +// mutatingwebhookconfiguration is present in the API. If it does not exist, +// it will be created. If it does exist, the metadata of the required +// mutatingwebhookconfiguration will be merged with the existing mutatingwebhookconfiguration +// and an update performed if the mutatingwebhookconfiguration spec and metadata differ from +// the previously required spec and metadata based on generation change. +func ApplyMutatingWebhookConfigurationImproved(ctx context.Context, client admissionregistrationclientv1.MutatingWebhookConfigurationsGetter, recorder events.Recorder, + requiredOriginal *admissionregistrationv1.MutatingWebhookConfiguration, cache ResourceCache) (*admissionregistrationv1.MutatingWebhookConfiguration, bool, error) { + + if requiredOriginal == nil { + return nil, false, fmt.Errorf("Unexpected nil instead of an object") + } + + existing, err := client.MutatingWebhookConfigurations().Get(ctx, requiredOriginal.GetName(), metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + required := requiredOriginal.DeepCopy() + actual, err := client.MutatingWebhookConfigurations().Create( + ctx, resourcemerge.WithCleanLabelsAndAnnotations(required).(*admissionregistrationv1.MutatingWebhookConfiguration), metav1.CreateOptions{}) + reportCreateEvent(recorder, required, err) + if err != nil { + return nil, false, err + } + // need to store the original so that the early comparison of hashes is done based on the original, not a mutated copy + cache.UpdateCachedResourceMetadata(requiredOriginal, actual) + return actual, true, nil + } else if err != nil { + return nil, false, err + } + + if cache.SafeToSkipApply(requiredOriginal, existing) { + return existing, false, nil + } + + required := requiredOriginal.DeepCopy() + modified := resourcemerge.BoolPtr(false) + existingCopy := existing.DeepCopy() + + resourcemerge.EnsureObjectMeta(modified, &existingCopy.ObjectMeta, required.ObjectMeta) + copyMutatingWebhookCABundle(existing, required) + webhooksEquivalent := equality.Semantic.DeepEqual(existingCopy.Webhooks, required.Webhooks) + if webhooksEquivalent && !*modified { + // need to store the original so that the early comparison of hashes is done based on the original, not a mutated copy + cache.UpdateCachedResourceMetadata(requiredOriginal, existingCopy) + return existingCopy, false, nil + } + // at this point we know that we're going to perform a write. We're just trying to get the object correct + toWrite := existingCopy // shallow copy so the code reads easier + toWrite.Webhooks = required.Webhooks + + klog.V(4).Infof("MutatingWebhookConfiguration %q changes: %v", required.GetNamespace()+"/"+required.GetName(), JSONPatchNoError(existing, toWrite)) + + actual, err := client.MutatingWebhookConfigurations().Update(ctx, toWrite, metav1.UpdateOptions{}) + reportUpdateEvent(recorder, required, err) + if err != nil { + return nil, false, err + } + // need to store the original so that the early comparison of hashes is done based on the original, not a mutated copy + cache.UpdateCachedResourceMetadata(requiredOriginal, actual) + return actual, true, nil +} + +// copyMutatingWebhookCABundle populates webhooks[].clientConfig.caBundle fields from existing resource if it was set before +// and is not set in present. This provides upgrade compatibility with service-ca-bundle operator. +func copyMutatingWebhookCABundle(from, to *admissionregistrationv1.MutatingWebhookConfiguration) { + fromMap := make(map[string]admissionregistrationv1.MutatingWebhook, len(from.Webhooks)) + for _, webhook := range from.Webhooks { + fromMap[webhook.Name] = webhook + } + + for i, wh := range to.Webhooks { + if existing, ok := fromMap[wh.Name]; ok && wh.ClientConfig.CABundle == nil { + to.Webhooks[i].ClientConfig.CABundle = existing.ClientConfig.CABundle + } + } +} + +// ApplyValidatingWebhookConfigurationImproved ensures the form of the specified +// validatingwebhookconfiguration is present in the API. If it does not exist, +// it will be created. If it does exist, the metadata of the required +// validatingwebhookconfiguration will be merged with the existing validatingwebhookconfiguration +// and an update performed if the validatingwebhookconfiguration spec and metadata differ from +// the previously required spec and metadata based on generation change. +func ApplyValidatingWebhookConfigurationImproved(ctx context.Context, client admissionregistrationclientv1.ValidatingWebhookConfigurationsGetter, recorder events.Recorder, + requiredOriginal *admissionregistrationv1.ValidatingWebhookConfiguration, cache ResourceCache) (*admissionregistrationv1.ValidatingWebhookConfiguration, bool, error) { + if requiredOriginal == nil { + return nil, false, fmt.Errorf("Unexpected nil instead of an object") + } + + existing, err := client.ValidatingWebhookConfigurations().Get(ctx, requiredOriginal.GetName(), metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + required := requiredOriginal.DeepCopy() + actual, err := client.ValidatingWebhookConfigurations().Create( + ctx, resourcemerge.WithCleanLabelsAndAnnotations(required).(*admissionregistrationv1.ValidatingWebhookConfiguration), metav1.CreateOptions{}) + reportCreateEvent(recorder, required, err) + if err != nil { + return nil, false, err + } + // need to store the original so that the early comparison of hashes is done based on the original, not a mutated copy + cache.UpdateCachedResourceMetadata(requiredOriginal, actual) + return actual, true, nil + } else if err != nil { + return nil, false, err + } + + if cache.SafeToSkipApply(requiredOriginal, existing) { + return existing, false, nil + } + + required := requiredOriginal.DeepCopy() + modified := resourcemerge.BoolPtr(false) + existingCopy := existing.DeepCopy() + + resourcemerge.EnsureObjectMeta(modified, &existingCopy.ObjectMeta, required.ObjectMeta) + copyValidatingWebhookCABundle(existing, required) + webhooksEquivalent := equality.Semantic.DeepEqual(existingCopy.Webhooks, required.Webhooks) + if webhooksEquivalent && !*modified { + // need to store the original so that the early comparison of hashes is done based on the original, not a mutated copy + cache.UpdateCachedResourceMetadata(requiredOriginal, existingCopy) + return existingCopy, false, nil + } + // at this point we know that we're going to perform a write. We're just trying to get the object correct + toWrite := existingCopy // shallow copy so the code reads easier + toWrite.Webhooks = required.Webhooks + + klog.V(4).Infof("ValidatingWebhookConfiguration %q changes: %v", required.GetNamespace()+"/"+required.GetName(), JSONPatchNoError(existing, toWrite)) + + actual, err := client.ValidatingWebhookConfigurations().Update(ctx, toWrite, metav1.UpdateOptions{}) + reportUpdateEvent(recorder, required, err) + if err != nil { + return nil, false, err + } + // need to store the original so that the early comparison of hashes is done based on the original, not a mutated copy + cache.UpdateCachedResourceMetadata(requiredOriginal, actual) + return actual, true, nil +} + +// copyValidatingWebhookCABundle populates webhooks[].clientConfig.caBundle fields from existing resource if it was set before +// and is not set in present. This provides upgrade compatibility with service-ca-bundle operator. +func copyValidatingWebhookCABundle(from, to *admissionregistrationv1.ValidatingWebhookConfiguration) { + fromMap := make(map[string]admissionregistrationv1.ValidatingWebhook, len(from.Webhooks)) + for _, webhook := range from.Webhooks { + fromMap[webhook.Name] = webhook + } + + for i, wh := range to.Webhooks { + if existing, ok := fromMap[wh.Name]; ok && wh.ClientConfig.CABundle == nil { + to.Webhooks[i].ClientConfig.CABundle = existing.ClientConfig.CABundle + } + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/apiextensions.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/apiextensions.go new file mode 100644 index 000000000..6cd94f64d --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/apiextensions.go @@ -0,0 +1,56 @@ +package resourceapply + +import ( + "context" + + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/resource/resourcemerge" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apiextclientv1 "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/klog/v2" +) + +// ApplyCustomResourceDefinitionV1 applies the required CustomResourceDefinition to the cluster. +func ApplyCustomResourceDefinitionV1(ctx context.Context, client apiextclientv1.CustomResourceDefinitionsGetter, recorder events.Recorder, required *apiextensionsv1.CustomResourceDefinition) (*apiextensionsv1.CustomResourceDefinition, bool, error) { + existing, err := client.CustomResourceDefinitions().Get(ctx, required.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + requiredCopy := required.DeepCopy() + actual, err := client.CustomResourceDefinitions().Create( + ctx, resourcemerge.WithCleanLabelsAndAnnotations(requiredCopy).(*apiextensionsv1.CustomResourceDefinition), metav1.CreateOptions{}) + reportCreateEvent(recorder, required, err) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + modified := resourcemerge.BoolPtr(false) + existingCopy := existing.DeepCopy() + resourcemerge.EnsureCustomResourceDefinitionV1(modified, existingCopy, *required) + if !*modified { + return existing, false, nil + } + + if klog.V(4).Enabled() { + klog.Infof("CustomResourceDefinition %q changes: %s", existing.Name, JSONPatchNoError(existing, existingCopy)) + } + + actual, err := client.CustomResourceDefinitions().Update(ctx, existingCopy, metav1.UpdateOptions{}) + reportUpdateEvent(recorder, required, err) + + return actual, true, err +} + +func DeleteCustomResourceDefinitionV1(ctx context.Context, client apiextclientv1.CustomResourceDefinitionsGetter, recorder events.Recorder, required *apiextensionsv1.CustomResourceDefinition) (*apiextensionsv1.CustomResourceDefinition, bool, error) { + err := client.CustomResourceDefinitions().Delete(ctx, required.Name, metav1.DeleteOptions{}) + if err != nil && apierrors.IsNotFound(err) { + return nil, false, nil + } + if err != nil { + return nil, false, err + } + reportDeleteEvent(recorder, required, err) + return nil, true, nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/apiregistration.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/apiregistration.go new file mode 100644 index 000000000..b09bf46f2 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/apiregistration.go @@ -0,0 +1,51 @@ +package resourceapply + +import ( + "context" + + "k8s.io/apimachinery/pkg/api/equality" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/klog/v2" + apiregistrationv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1" + apiregistrationv1client "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1" + + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/resource/resourcemerge" +) + +// ApplyAPIService merges objectmeta and requires apiservice coordinates. It does not touch CA bundles, which should be managed via service CA controller. +func ApplyAPIService(ctx context.Context, client apiregistrationv1client.APIServicesGetter, recorder events.Recorder, required *apiregistrationv1.APIService) (*apiregistrationv1.APIService, bool, error) { + existing, err := client.APIServices().Get(ctx, required.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + requiredCopy := required.DeepCopy() + actual, err := client.APIServices().Create( + ctx, resourcemerge.WithCleanLabelsAndAnnotations(requiredCopy).(*apiregistrationv1.APIService), metav1.CreateOptions{}) + reportCreateEvent(recorder, required, err) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + modified := resourcemerge.BoolPtr(false) + existingCopy := existing.DeepCopy() + + resourcemerge.EnsureObjectMeta(modified, &existingCopy.ObjectMeta, required.ObjectMeta) + serviceSame := equality.Semantic.DeepEqual(existingCopy.Spec.Service, required.Spec.Service) + prioritySame := existingCopy.Spec.VersionPriority == required.Spec.VersionPriority && existingCopy.Spec.GroupPriorityMinimum == required.Spec.GroupPriorityMinimum + insecureSame := existingCopy.Spec.InsecureSkipTLSVerify == required.Spec.InsecureSkipTLSVerify + // there was no change to metadata, the service and priorities were right + if !*modified && serviceSame && prioritySame && insecureSame { + return existingCopy, false, nil + } + + existingCopy.Spec = required.Spec + + if klog.V(4).Enabled() { + klog.Infof("APIService %q changes: %s", existing.Name, JSONPatchNoError(existing, existingCopy)) + } + actual, err := client.APIServices().Update(ctx, existingCopy, metav1.UpdateOptions{}) + reportUpdateEvent(recorder, required, err) + return actual, true, err +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/apps.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/apps.go new file mode 100644 index 000000000..b2a645e5d --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/apps.go @@ -0,0 +1,246 @@ +package resourceapply + +import ( + "context" + "crypto/sha256" + "encoding/json" + "fmt" + + "k8s.io/klog/v2" + + appsv1 "k8s.io/api/apps/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/uuid" + appsclientv1 "k8s.io/client-go/kubernetes/typed/apps/v1" + + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/resource/resourcemerge" +) + +// The Apply methods in this file ensure that a resource is created or updated to match +// the form provided by the caller. +// +// If the resource does not yet exist, it will be created. +// +// If the resource exists, the metadata of the required resource will be merged with the +// existing resource and an update will be performed if the spec and metadata differ between +// the required and existing resources. To be reliable, the input of the required spec from +// the operator should be stable. It does not need to set all fields, since some fields are +// defaulted server-side. Detection of spec drift from intent by other actors is determined +// by generation, not by spec comparison. +// +// To ensure an update in response to state external to the resource spec, the caller should +// set an annotation representing that external state e.g. +// +// `myoperator.openshift.io/config-resource-version: ` +// +// An update will be performed if: +// +// - The required resource metadata differs from that of the existing resource. +// - The difference will be detected by comparing the name, namespace, labels and +// annotations of the 2 resources. +// +// - The generation expected by the operator differs from generation of the existing +// resource. +// - This is the likely result of an actor other than the operator updating a resource +// managed by the operator. +// +// - The spec of the required resource differs from the spec of the existing resource. +// - The difference will be detected via metadata comparison since the hash of the +// resource's spec will be set as an annotation prior to comparison. + +const specHashAnnotation = "operator.openshift.io/spec-hash" + +// SetSpecHashAnnotation computes the hash of the provided spec and sets an annotation of the +// hash on the provided ObjectMeta. This method is used internally by Apply methods, and +// is exposed to support testing with fake clients that need to know the mutated form of the +// resource resulting from an Apply call. +func SetSpecHashAnnotation(objMeta *metav1.ObjectMeta, spec interface{}) error { + jsonBytes, err := json.Marshal(spec) + if err != nil { + return err + } + specHash := fmt.Sprintf("%x", sha256.Sum256(jsonBytes)) + if objMeta.Annotations == nil { + objMeta.Annotations = map[string]string{} + } + objMeta.Annotations[specHashAnnotation] = specHash + return nil +} + +// ApplyDeployment ensures the form of the specified deployment is present in the API. If it +// does not exist, it will be created. If it does exist, the metadata of the required +// deployment will be merged with the existing deployment and an update performed if the +// deployment spec and metadata differ from the previously required spec and metadata. For +// further detail, check the top-level comment. +// +// NOTE: The previous implementation of this method was renamed to +// ApplyDeploymentWithForce. If are reading this in response to a compile error due to the +// change in signature, you have the following options: +// +// - Update the calling code to rely on the spec comparison provided by the new +// implementation. If the code in question was specifying the force parameter to ensure +// rollout in response to changes in resources external to the deployment, it will need to be +// revised to set that external state as an annotation e.g. +// +// myoperator.openshift.io/my-resource: +// +// - Update the call to use ApplyDeploymentWithForce. This is available as a temporary measure +// but the method is deprecated and will be removed in 4.6. +func ApplyDeployment(ctx context.Context, client appsclientv1.DeploymentsGetter, recorder events.Recorder, + requiredOriginal *appsv1.Deployment, expectedGeneration int64) (*appsv1.Deployment, bool, error) { + + required := requiredOriginal.DeepCopy() + err := SetSpecHashAnnotation(&required.ObjectMeta, required.Spec) + if err != nil { + return nil, false, err + } + + return ApplyDeploymentWithForce(ctx, client, recorder, required, expectedGeneration, false) +} + +// ApplyDeploymentWithForce merges objectmeta and requires matching generation. It returns the final Object, whether any change as made, and an error. +// +// DEPRECATED - This method will be removed in 4.6 and callers will need to migrate to ApplyDeployment before then. +func ApplyDeploymentWithForce(ctx context.Context, client appsclientv1.DeploymentsGetter, recorder events.Recorder, requiredOriginal *appsv1.Deployment, expectedGeneration int64, + forceRollout bool) (*appsv1.Deployment, bool, error) { + + required := requiredOriginal.DeepCopy() + if required.Annotations == nil { + required.Annotations = map[string]string{} + } + if _, ok := required.Annotations[specHashAnnotation]; !ok { + // If the spec hash annotation is not present, the caller expects the + // pull-spec annotation to be applied. + required.Annotations["operator.openshift.io/pull-spec"] = required.Spec.Template.Spec.Containers[0].Image + } + existing, err := client.Deployments(required.Namespace).Get(ctx, required.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + actual, err := client.Deployments(required.Namespace).Create(ctx, required, metav1.CreateOptions{}) + reportCreateEvent(recorder, required, err) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + modified := resourcemerge.BoolPtr(false) + existingCopy := existing.DeepCopy() + + resourcemerge.EnsureObjectMeta(modified, &existingCopy.ObjectMeta, required.ObjectMeta) + // there was no change to metadata, the generation was right, and we weren't asked for force the deployment + if !*modified && existingCopy.ObjectMeta.Generation == expectedGeneration && !forceRollout { + return existingCopy, false, nil + } + + // at this point we know that we're going to perform a write. We're just trying to get the object correct + toWrite := existingCopy // shallow copy so the code reads easier + toWrite.Spec = *required.Spec.DeepCopy() + if forceRollout { + // forces a deployment + forceString := string(uuid.NewUUID()) + if toWrite.Annotations == nil { + toWrite.Annotations = map[string]string{} + } + if toWrite.Spec.Template.Annotations == nil { + toWrite.Spec.Template.Annotations = map[string]string{} + } + toWrite.Annotations["operator.openshift.io/force"] = forceString + toWrite.Spec.Template.Annotations["operator.openshift.io/force"] = forceString + } + + if klog.V(4).Enabled() { + klog.Infof("Deployment %q changes: %v", required.Namespace+"/"+required.Name, JSONPatchNoError(existing, toWrite)) + } + + actual, err := client.Deployments(required.Namespace).Update(ctx, toWrite, metav1.UpdateOptions{}) + reportUpdateEvent(recorder, required, err) + return actual, true, err +} + +// ApplyDaemonSet ensures the form of the specified daemonset is present in the API. If it +// does not exist, it will be created. If it does exist, the metadata of the required +// daemonset will be merged with the existing daemonset and an update performed if the +// daemonset spec and metadata differ from the previously required spec and metadata. For +// further detail, check the top-level comment. +// +// NOTE: The previous implementation of this method was renamed to ApplyDaemonSetWithForce. If +// are reading this in response to a compile error due to the change in signature, you have +// the following options: +// +// - Update the calling code to rely on the spec comparison provided by the new +// implementation. If the code in question was specifying the force parameter to ensure +// rollout in response to changes in resources external to the daemonset, it will need to be +// revised to set that external state as an annotation e.g. +// +// myoperator.openshift.io/my-resource: +// +// - Update the call to use ApplyDaemonSetWithForce. This is available as a temporary measure +// but the method is deprecated and will be removed in 4.6. +func ApplyDaemonSet(ctx context.Context, client appsclientv1.DaemonSetsGetter, recorder events.Recorder, + requiredOriginal *appsv1.DaemonSet, expectedGeneration int64) (*appsv1.DaemonSet, bool, error) { + + required := requiredOriginal.DeepCopy() + err := SetSpecHashAnnotation(&required.ObjectMeta, required.Spec) + if err != nil { + return nil, false, err + } + + return ApplyDaemonSetWithForce(ctx, client, recorder, required, expectedGeneration, false) +} + +// ApplyDaemonSetWithForce merges objectmeta and requires matching generation. It returns the final Object, whether any change as made, and an error +// DEPRECATED - This method will be removed in 4.6 and callers will need to migrate to ApplyDaemonSet before then. +func ApplyDaemonSetWithForce(ctx context.Context, client appsclientv1.DaemonSetsGetter, recorder events.Recorder, requiredOriginal *appsv1.DaemonSet, expectedGeneration int64, forceRollout bool) (*appsv1.DaemonSet, bool, error) { + required := requiredOriginal.DeepCopy() + if required.Annotations == nil { + required.Annotations = map[string]string{} + } + if _, ok := required.Annotations[specHashAnnotation]; !ok { + // If the spec hash annotation is not present, the caller expects the + // pull-spec annotation to be applied. + required.Annotations["operator.openshift.io/pull-spec"] = required.Spec.Template.Spec.Containers[0].Image + } + existing, err := client.DaemonSets(required.Namespace).Get(ctx, required.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + actual, err := client.DaemonSets(required.Namespace).Create(ctx, required, metav1.CreateOptions{}) + reportCreateEvent(recorder, required, err) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + modified := resourcemerge.BoolPtr(false) + existingCopy := existing.DeepCopy() + + resourcemerge.EnsureObjectMeta(modified, &existingCopy.ObjectMeta, required.ObjectMeta) + // there was no change to metadata, the generation was right, and we weren't asked for force the deployment + if !*modified && existingCopy.ObjectMeta.Generation == expectedGeneration && !forceRollout { + return existingCopy, false, nil + } + + // at this point we know that we're going to perform a write. We're just trying to get the object correct + toWrite := existingCopy // shallow copy so the code reads easier + toWrite.Spec = *required.Spec.DeepCopy() + if forceRollout { + // forces a deployment + forceString := string(uuid.NewUUID()) + if toWrite.Annotations == nil { + toWrite.Annotations = map[string]string{} + } + if toWrite.Spec.Template.Annotations == nil { + toWrite.Spec.Template.Annotations = map[string]string{} + } + toWrite.Annotations["operator.openshift.io/force"] = forceString + toWrite.Spec.Template.Annotations["operator.openshift.io/force"] = forceString + } + + if klog.V(4).Enabled() { + klog.Infof("DaemonSet %q changes: %v", required.Namespace+"/"+required.Name, JSONPatchNoError(existing, toWrite)) + } + actual, err := client.DaemonSets(required.Namespace).Update(ctx, toWrite, metav1.UpdateOptions{}) + reportUpdateEvent(recorder, required, err) + return actual, true, err +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/core.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/core.go new file mode 100644 index 000000000..c519d4dc5 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/core.go @@ -0,0 +1,657 @@ +package resourceapply + +import ( + "bytes" + "context" + "fmt" + "sort" + "strings" + + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/resource/resourcemerge" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/sets" + coreclientv1 "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/klog/v2" +) + +// TODO find way to create a registry of these based on struct mapping or some such that forces users to get this right +// +// for creating an ApplyGeneric +// Perhaps a struct containing the apply function and the getKind +func getCoreGroupKind(obj runtime.Object) *schema.GroupKind { + switch obj.(type) { + case *corev1.Namespace: + return &schema.GroupKind{ + Kind: "Namespace", + } + case *corev1.Service: + return &schema.GroupKind{ + Kind: "Service", + } + case *corev1.Pod: + return &schema.GroupKind{ + Kind: "Pod", + } + case *corev1.ServiceAccount: + return &schema.GroupKind{ + Kind: "ServiceAccount", + } + case *corev1.ConfigMap: + return &schema.GroupKind{ + Kind: "ConfigMap", + } + case *corev1.Secret: + return &schema.GroupKind{ + Kind: "Secret", + } + default: + return nil + } +} + +// ApplyNamespace merges objectmeta, does not worry about anything else +func ApplyNamespace(ctx context.Context, client coreclientv1.NamespacesGetter, recorder events.Recorder, required *corev1.Namespace) (*corev1.Namespace, bool, error) { + return ApplyNamespaceImproved(ctx, client, recorder, required, noCache) +} + +// ApplyService merges objectmeta and requires +// TODO, since this cannot determine whether changes are due to legitimate actors (api server) or illegitimate ones (users), we cannot update +// TODO I've special cased the selector for now +func ApplyService(ctx context.Context, client coreclientv1.ServicesGetter, recorder events.Recorder, required *corev1.Service) (*corev1.Service, bool, error) { + return ApplyServiceImproved(ctx, client, recorder, required, noCache) +} + +// ApplyPod merges objectmeta, does not worry about anything else +func ApplyPod(ctx context.Context, client coreclientv1.PodsGetter, recorder events.Recorder, required *corev1.Pod) (*corev1.Pod, bool, error) { + return ApplyPodImproved(ctx, client, recorder, required, noCache) +} + +// ApplyServiceAccount merges objectmeta, does not worry about anything else +func ApplyServiceAccount(ctx context.Context, client coreclientv1.ServiceAccountsGetter, recorder events.Recorder, required *corev1.ServiceAccount) (*corev1.ServiceAccount, bool, error) { + return ApplyServiceAccountImproved(ctx, client, recorder, required, noCache) +} + +// ApplyConfigMap merges objectmeta, requires data +func ApplyConfigMap(ctx context.Context, client coreclientv1.ConfigMapsGetter, recorder events.Recorder, required *corev1.ConfigMap) (*corev1.ConfigMap, bool, error) { + return ApplyConfigMapImproved(ctx, client, recorder, required, noCache) +} + +// ApplySecret merges objectmeta, requires data +func ApplySecret(ctx context.Context, client coreclientv1.SecretsGetter, recorder events.Recorder, required *corev1.Secret) (*corev1.Secret, bool, error) { + return ApplySecretImproved(ctx, client, recorder, required, noCache) +} + +// ApplyNamespace merges objectmeta, does not worry about anything else +func ApplyNamespaceImproved(ctx context.Context, client coreclientv1.NamespacesGetter, recorder events.Recorder, required *corev1.Namespace, cache ResourceCache) (*corev1.Namespace, bool, error) { + existing, err := client.Namespaces().Get(ctx, required.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + requiredCopy := required.DeepCopy() + actual, err := client.Namespaces(). + Create(ctx, resourcemerge.WithCleanLabelsAndAnnotations(requiredCopy).(*corev1.Namespace), metav1.CreateOptions{}) + reportCreateEvent(recorder, requiredCopy, err) + cache.UpdateCachedResourceMetadata(required, actual) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + if cache.SafeToSkipApply(required, existing) { + return existing, false, nil + } + + modified := resourcemerge.BoolPtr(false) + existingCopy := existing.DeepCopy() + + resourcemerge.EnsureObjectMeta(modified, &existingCopy.ObjectMeta, required.ObjectMeta) + if !*modified { + cache.UpdateCachedResourceMetadata(required, existingCopy) + return existingCopy, false, nil + } + + if klog.V(4).Enabled() { + klog.Infof("Namespace %q changes: %v", required.Name, JSONPatchNoError(existing, existingCopy)) + } + + actual, err := client.Namespaces().Update(ctx, existingCopy, metav1.UpdateOptions{}) + reportUpdateEvent(recorder, required, err) + cache.UpdateCachedResourceMetadata(required, actual) + return actual, true, err +} + +// ApplyService merges objectmeta and requires. +// It detects changes in `required`, i.e. an operator needs .spec changes and overwrites existing .spec with those. +// TODO, since this cannot determine whether changes in `existing` are due to legitimate actors (api server) or illegitimate ones (users), we cannot update. +// TODO I've special cased the selector for now +func ApplyServiceImproved(ctx context.Context, client coreclientv1.ServicesGetter, recorder events.Recorder, requiredOriginal *corev1.Service, cache ResourceCache) (*corev1.Service, bool, error) { + required := requiredOriginal.DeepCopy() + err := SetSpecHashAnnotation(&required.ObjectMeta, required.Spec) + if err != nil { + return nil, false, err + } + + existing, err := client.Services(required.Namespace).Get(ctx, required.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + requiredCopy := required.DeepCopy() + actual, err := client.Services(requiredCopy.Namespace). + Create(ctx, resourcemerge.WithCleanLabelsAndAnnotations(requiredCopy).(*corev1.Service), metav1.CreateOptions{}) + reportCreateEvent(recorder, requiredCopy, err) + cache.UpdateCachedResourceMetadata(required, actual) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + if cache.SafeToSkipApply(required, existing) { + return existing, false, nil + } + + modified := resourcemerge.BoolPtr(false) + existingCopy := existing.DeepCopy() + + // This will catch also changes between old `required.spec` and current `required.spec`, because + // the annotation from SetSpecHashAnnotation will be different. + resourcemerge.EnsureObjectMeta(modified, &existingCopy.ObjectMeta, required.ObjectMeta) + selectorSame := equality.Semantic.DeepEqual(existingCopy.Spec.Selector, required.Spec.Selector) + + typeSame := false + requiredIsEmpty := len(required.Spec.Type) == 0 + existingCopyIsCluster := existingCopy.Spec.Type == corev1.ServiceTypeClusterIP + if (requiredIsEmpty && existingCopyIsCluster) || equality.Semantic.DeepEqual(existingCopy.Spec.Type, required.Spec.Type) { + typeSame = true + } + + if selectorSame && typeSame && !*modified { + cache.UpdateCachedResourceMetadata(required, existingCopy) + return existingCopy, false, nil + } + + // Either (user changed selector or type) or metadata changed (incl. spec hash). Stomp over + // any user *and* Kubernetes changes, hoping that Kubernetes will restore its values. + existingCopy.Spec = required.Spec + if klog.V(4).Enabled() { + klog.Infof("Service %q changes: %v", required.Namespace+"/"+required.Name, JSONPatchNoError(existing, required)) + } + + actual, err := client.Services(required.Namespace).Update(ctx, existingCopy, metav1.UpdateOptions{}) + reportUpdateEvent(recorder, required, err) + cache.UpdateCachedResourceMetadata(required, actual) + return actual, true, err +} + +// ApplyPod merges objectmeta, does not worry about anything else +func ApplyPodImproved(ctx context.Context, client coreclientv1.PodsGetter, recorder events.Recorder, required *corev1.Pod, cache ResourceCache) (*corev1.Pod, bool, error) { + existing, err := client.Pods(required.Namespace).Get(ctx, required.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + requiredCopy := required.DeepCopy() + actual, err := client.Pods(requiredCopy.Namespace). + Create(ctx, resourcemerge.WithCleanLabelsAndAnnotations(requiredCopy).(*corev1.Pod), metav1.CreateOptions{}) + reportCreateEvent(recorder, requiredCopy, err) + cache.UpdateCachedResourceMetadata(required, actual) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + if cache.SafeToSkipApply(required, existing) { + return existing, false, nil + } + + modified := resourcemerge.BoolPtr(false) + existingCopy := existing.DeepCopy() + + resourcemerge.EnsureObjectMeta(modified, &existingCopy.ObjectMeta, required.ObjectMeta) + if !*modified { + cache.UpdateCachedResourceMetadata(required, existingCopy) + return existingCopy, false, nil + } + + if klog.V(4).Enabled() { + klog.Infof("Pod %q changes: %v", required.Namespace+"/"+required.Name, JSONPatchNoError(existing, required)) + } + + actual, err := client.Pods(required.Namespace).Update(ctx, existingCopy, metav1.UpdateOptions{}) + reportUpdateEvent(recorder, required, err) + cache.UpdateCachedResourceMetadata(required, actual) + return actual, true, err +} + +// ApplyServiceAccount merges objectmeta, does not worry about anything else +func ApplyServiceAccountImproved(ctx context.Context, client coreclientv1.ServiceAccountsGetter, recorder events.Recorder, required *corev1.ServiceAccount, cache ResourceCache) (*corev1.ServiceAccount, bool, error) { + existing, err := client.ServiceAccounts(required.Namespace).Get(ctx, required.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + requiredCopy := required.DeepCopy() + actual, err := client.ServiceAccounts(requiredCopy.Namespace). + Create(ctx, resourcemerge.WithCleanLabelsAndAnnotations(requiredCopy).(*corev1.ServiceAccount), metav1.CreateOptions{}) + reportCreateEvent(recorder, requiredCopy, err) + cache.UpdateCachedResourceMetadata(required, actual) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + if cache.SafeToSkipApply(required, existing) { + return existing, false, nil + } + + modified := resourcemerge.BoolPtr(false) + existingCopy := existing.DeepCopy() + + resourcemerge.EnsureObjectMeta(modified, &existingCopy.ObjectMeta, required.ObjectMeta) + if !*modified { + cache.UpdateCachedResourceMetadata(required, existingCopy) + return existingCopy, false, nil + } + if klog.V(4).Enabled() { + klog.Infof("ServiceAccount %q changes: %v", required.Namespace+"/"+required.Name, JSONPatchNoError(existing, required)) + } + actual, err := client.ServiceAccounts(required.Namespace).Update(ctx, existingCopy, metav1.UpdateOptions{}) + reportUpdateEvent(recorder, required, err) + cache.UpdateCachedResourceMetadata(required, actual) + return actual, true, err +} + +// ApplyConfigMap merges objectmeta, requires data +func ApplyConfigMapImproved(ctx context.Context, client coreclientv1.ConfigMapsGetter, recorder events.Recorder, required *corev1.ConfigMap, cache ResourceCache) (*corev1.ConfigMap, bool, error) { + existing, err := client.ConfigMaps(required.Namespace).Get(ctx, required.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + requiredCopy := required.DeepCopy() + actual, err := client.ConfigMaps(requiredCopy.Namespace). + Create(ctx, resourcemerge.WithCleanLabelsAndAnnotations(requiredCopy).(*corev1.ConfigMap), metav1.CreateOptions{}) + reportCreateEvent(recorder, requiredCopy, err) + cache.UpdateCachedResourceMetadata(required, actual) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + if cache.SafeToSkipApply(required, existing) { + return existing, false, nil + } + + modified := resourcemerge.BoolPtr(false) + existingCopy := existing.DeepCopy() + + resourcemerge.EnsureObjectMeta(modified, &existingCopy.ObjectMeta, required.ObjectMeta) + + caBundleInjected := required.Labels["config.openshift.io/inject-trusted-cabundle"] == "true" + _, newCABundleRequired := required.Data["ca-bundle.crt"] + + var modifiedKeys []string + for existingCopyKey, existingCopyValue := range existingCopy.Data { + // if we're injecting a ca-bundle and the required isn't forcing the value, then don't use the value of existing + // to drive a diff detection. If required has set the value then we need to force the value in order to have apply + // behave predictably. + if caBundleInjected && !newCABundleRequired && existingCopyKey == "ca-bundle.crt" { + continue + } + if requiredValue, ok := required.Data[existingCopyKey]; !ok || (existingCopyValue != requiredValue) { + modifiedKeys = append(modifiedKeys, "data."+existingCopyKey) + } + } + for existingCopyKey, existingCopyBinValue := range existingCopy.BinaryData { + if requiredBinValue, ok := required.BinaryData[existingCopyKey]; !ok || !bytes.Equal(existingCopyBinValue, requiredBinValue) { + modifiedKeys = append(modifiedKeys, "binaryData."+existingCopyKey) + } + } + for requiredKey := range required.Data { + if _, ok := existingCopy.Data[requiredKey]; !ok { + modifiedKeys = append(modifiedKeys, "data."+requiredKey) + } + } + for requiredBinKey := range required.BinaryData { + if _, ok := existingCopy.BinaryData[requiredBinKey]; !ok { + modifiedKeys = append(modifiedKeys, "binaryData."+requiredBinKey) + } + } + + dataSame := len(modifiedKeys) == 0 + if dataSame && !*modified { + cache.UpdateCachedResourceMetadata(required, existingCopy) + return existingCopy, false, nil + } + existingCopy.Data = required.Data + existingCopy.BinaryData = required.BinaryData + // if we're injecting a cabundle, and we had a previous value, and the required object isn't setting the value, then set back to the previous + if existingCABundle, existedBefore := existing.Data["ca-bundle.crt"]; caBundleInjected && existedBefore && !newCABundleRequired { + if existingCopy.Data == nil { + existingCopy.Data = map[string]string{} + } + existingCopy.Data["ca-bundle.crt"] = existingCABundle + } + + actual, err := client.ConfigMaps(required.Namespace).Update(ctx, existingCopy, metav1.UpdateOptions{}) + + var details string + if !dataSame { + sort.Sort(sort.StringSlice(modifiedKeys)) + details = fmt.Sprintf("cause by changes in %v", strings.Join(modifiedKeys, ",")) + } + if klog.V(4).Enabled() { + klog.Infof("ConfigMap %q changes: %v", required.Namespace+"/"+required.Name, JSONPatchNoError(existing, required)) + } + reportUpdateEvent(recorder, required, err, details) + cache.UpdateCachedResourceMetadata(required, actual) + return actual, true, err +} + +// ApplySecret merges objectmeta, requires data +func ApplySecretImproved(ctx context.Context, client coreclientv1.SecretsGetter, recorder events.Recorder, requiredInput *corev1.Secret, cache ResourceCache) (*corev1.Secret, bool, error) { + // copy the stringData to data. Error on a data content conflict inside required. This is usually a bug. + + existing, err := client.Secrets(requiredInput.Namespace).Get(ctx, requiredInput.Name, metav1.GetOptions{}) + if err != nil && !apierrors.IsNotFound(err) { + return nil, false, err + } + + if cache.SafeToSkipApply(requiredInput, existing) { + return existing, false, nil + } + + required := requiredInput.DeepCopy() + if required.Data == nil { + required.Data = map[string][]byte{} + } + for k, v := range required.StringData { + if dataV, ok := required.Data[k]; ok { + if string(dataV) != v { + return nil, false, fmt.Errorf("Secret.stringData[%q] conflicts with Secret.data[%q]", k, k) + } + } + required.Data[k] = []byte(v) + } + required.StringData = nil + + if apierrors.IsNotFound(err) { + requiredCopy := required.DeepCopy() + actual, err := client.Secrets(requiredCopy.Namespace). + Create(ctx, resourcemerge.WithCleanLabelsAndAnnotations(requiredCopy).(*corev1.Secret), metav1.CreateOptions{}) + reportCreateEvent(recorder, requiredCopy, err) + cache.UpdateCachedResourceMetadata(requiredInput, actual) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + existingCopy := existing.DeepCopy() + + resourcemerge.EnsureObjectMeta(resourcemerge.BoolPtr(false), &existingCopy.ObjectMeta, required.ObjectMeta) + + switch required.Type { + case corev1.SecretTypeServiceAccountToken: + // Secrets for ServiceAccountTokens will have data injected by kube controller manager. + // We will apply only the explicitly set keys. + if existingCopy.Data == nil { + existingCopy.Data = map[string][]byte{} + } + + for k, v := range required.Data { + existingCopy.Data[k] = v + } + + default: + existingCopy.Data = required.Data + } + + existingCopy.Type = required.Type + + // Server defaults some values and we need to do it as well or it will never equal. + if existingCopy.Type == "" { + existingCopy.Type = corev1.SecretTypeOpaque + } + + if equality.Semantic.DeepEqual(existingCopy, existing) { + cache.UpdateCachedResourceMetadata(requiredInput, existingCopy) + return existing, false, nil + } + + if klog.V(4).Enabled() { + klog.Infof("Secret %s/%s changes: %v", required.Namespace, required.Name, JSONPatchSecretNoError(existing, existingCopy)) + } + + var actual *corev1.Secret + /* + * Kubernetes validation silently hides failures to update secret type. + * https://github.com/kubernetes/kubernetes/blob/98e65951dccfd40d3b4f31949c2ab8df5912d93e/pkg/apis/core/validation/validation.go#L5048 + * We need to explicitly opt for delete+create in that case. + */ + if existingCopy.Type == existing.Type { + actual, err = client.Secrets(required.Namespace).Update(ctx, existingCopy, metav1.UpdateOptions{}) + reportUpdateEvent(recorder, existingCopy, err) + + if err == nil { + return actual, true, err + } + if !strings.Contains(err.Error(), "field is immutable") { + return actual, true, err + } + } + + // if the field was immutable on a secret, we're going to be stuck until we delete it. Try to delete and then create + deleteErr := client.Secrets(required.Namespace).Delete(ctx, existingCopy.Name, metav1.DeleteOptions{}) + reportDeleteEvent(recorder, existingCopy, deleteErr) + + // clear the RV and track the original actual and error for the return like our create value. + existingCopy.ResourceVersion = "" + actual, err = client.Secrets(required.Namespace).Create(ctx, existingCopy, metav1.CreateOptions{}) + reportCreateEvent(recorder, existingCopy, err) + cache.UpdateCachedResourceMetadata(requiredInput, actual) + return actual, true, err +} + +// SyncConfigMap applies a ConfigMap from a location `sourceNamespace/sourceName` to `targetNamespace/targetName` +func SyncConfigMap(ctx context.Context, client coreclientv1.ConfigMapsGetter, recorder events.Recorder, sourceNamespace, sourceName, targetNamespace, targetName string, ownerRefs []metav1.OwnerReference) (*corev1.ConfigMap, bool, error) { + return SyncPartialConfigMap(ctx, client, recorder, sourceNamespace, sourceName, targetNamespace, targetName, nil, ownerRefs) +} + +// SyncPartialConfigMap does what SyncConfigMap does but it only synchronizes a subset of keys given by `syncedKeys`. +// SyncPartialConfigMap will delete the target if `syncedKeys` are set but the source does not contain any of these keys. +func SyncPartialConfigMap(ctx context.Context, client coreclientv1.ConfigMapsGetter, recorder events.Recorder, sourceNamespace, sourceName, targetNamespace, targetName string, syncedKeys sets.String, ownerRefs []metav1.OwnerReference) (*corev1.ConfigMap, bool, error) { + source, err := client.ConfigMaps(sourceNamespace).Get(ctx, sourceName, metav1.GetOptions{}) + switch { + case apierrors.IsNotFound(err): + modified, err := deleteConfigMapSyncTarget(ctx, client, recorder, targetNamespace, targetName) + return nil, modified, err + case err != nil: + return nil, false, err + default: + if len(syncedKeys) > 0 { + for sourceKey := range source.Data { + if !syncedKeys.Has(sourceKey) { + delete(source.Data, sourceKey) + } + } + for sourceKey := range source.BinaryData { + if !syncedKeys.Has(sourceKey) { + delete(source.BinaryData, sourceKey) + } + } + + // remove the synced CM if the requested fields are not present in source + if len(source.Data)+len(source.BinaryData) == 0 { + modified, err := deleteConfigMapSyncTarget(ctx, client, recorder, targetNamespace, targetName) + return nil, modified, err + } + } + + source.Namespace = targetNamespace + source.Name = targetName + source.ResourceVersion = "" + source.OwnerReferences = ownerRefs + return ApplyConfigMap(ctx, client, recorder, source) + } +} + +func deleteConfigMapSyncTarget(ctx context.Context, client coreclientv1.ConfigMapsGetter, recorder events.Recorder, targetNamespace, targetName string) (bool, error) { + // This goal of this additional GET is to avoid reaching the API with a DELETE request + // in case the target doesn't exist. This is useful when using a cached client. + _, err := client.ConfigMaps(targetNamespace).Get(ctx, targetName, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + return false, nil + } + err = client.ConfigMaps(targetNamespace).Delete(ctx, targetName, metav1.DeleteOptions{}) + if apierrors.IsNotFound(err) { + return false, nil + } + if err == nil { + recorder.Eventf("TargetConfigDeleted", "Deleted target configmap %s/%s because source config does not exist", targetNamespace, targetName) + return true, nil + } + return false, err +} + +// SyncSecret applies a Secret from a location `sourceNamespace/sourceName` to `targetNamespace/targetName` +func SyncSecret(ctx context.Context, client coreclientv1.SecretsGetter, recorder events.Recorder, sourceNamespace, sourceName, targetNamespace, targetName string, ownerRefs []metav1.OwnerReference) (*corev1.Secret, bool, error) { + return SyncPartialSecret(ctx, client, recorder, sourceNamespace, sourceName, targetNamespace, targetName, nil, ownerRefs) +} + +// SyncPartialSecret does what SyncSecret does but it only synchronizes a subset of keys given by `syncedKeys`. +// SyncPartialSecret will delete the target if `syncedKeys` are set but the source does not contain any of these keys. +func SyncPartialSecret(ctx context.Context, client coreclientv1.SecretsGetter, recorder events.Recorder, sourceNamespace, sourceName, targetNamespace, targetName string, syncedKeys sets.String, ownerRefs []metav1.OwnerReference) (*corev1.Secret, bool, error) { + source, err := client.Secrets(sourceNamespace).Get(ctx, sourceName, metav1.GetOptions{}) + switch { + case apierrors.IsNotFound(err): + modified, err := deleteSecretSyncTarget(ctx, client, recorder, targetNamespace, targetName) + return nil, modified, err + case err != nil: + return nil, false, err + default: + if source.Type == corev1.SecretTypeServiceAccountToken { + + // Make sure the token is already present, otherwise we have to wait before creating the target + if len(source.Data[corev1.ServiceAccountTokenKey]) == 0 { + return nil, false, fmt.Errorf("secret %s/%s doesn't have a token yet", source.Namespace, source.Name) + } + + if source.Annotations != nil { + // When syncing a service account token we have to remove the SA annotation to disable injection into copies + delete(source.Annotations, corev1.ServiceAccountNameKey) + // To make it clean, remove the dormant annotations as well + delete(source.Annotations, corev1.ServiceAccountUIDKey) + } + + // SecretTypeServiceAccountToken implies required fields and injection which we do not want in copies + source.Type = corev1.SecretTypeOpaque + } + + if len(syncedKeys) > 0 { + for sourceKey := range source.Data { + if !syncedKeys.Has(sourceKey) { + delete(source.Data, sourceKey) + } + } + for sourceKey := range source.StringData { + if !syncedKeys.Has(sourceKey) { + delete(source.StringData, sourceKey) + } + } + + // remove the synced secret if the requested fields are not present in source + if len(source.Data)+len(source.StringData) == 0 { + modified, err := deleteSecretSyncTarget(ctx, client, recorder, targetNamespace, targetName) + return nil, modified, err + } + } + + source.Namespace = targetNamespace + source.Name = targetName + source.ResourceVersion = "" + source.OwnerReferences = ownerRefs + return ApplySecret(ctx, client, recorder, source) + } +} + +func deleteSecretSyncTarget(ctx context.Context, client coreclientv1.SecretsGetter, recorder events.Recorder, targetNamespace, targetName string) (bool, error) { + err := client.Secrets(targetNamespace).Delete(ctx, targetName, metav1.DeleteOptions{}) + if apierrors.IsNotFound(err) { + return false, nil + } + if err == nil { + recorder.Eventf("TargetSecretDeleted", "Deleted target secret %s/%s because source config does not exist", targetNamespace, targetName) + return true, nil + } + return false, err +} + +func DeleteNamespace(ctx context.Context, client coreclientv1.NamespacesGetter, recorder events.Recorder, required *corev1.Namespace) (*corev1.Namespace, bool, error) { + err := client.Namespaces().Delete(ctx, required.Name, metav1.DeleteOptions{}) + if err != nil && apierrors.IsNotFound(err) { + return nil, false, nil + } + if err != nil { + return nil, false, err + } + reportDeleteEvent(recorder, required, err) + return nil, true, nil +} + +func DeleteService(ctx context.Context, client coreclientv1.ServicesGetter, recorder events.Recorder, required *corev1.Service) (*corev1.Service, bool, error) { + err := client.Services(required.Namespace).Delete(ctx, required.Name, metav1.DeleteOptions{}) + if err != nil && apierrors.IsNotFound(err) { + return nil, false, nil + } + if err != nil { + return nil, false, err + } + reportDeleteEvent(recorder, required, err) + return nil, true, nil +} + +func DeletePod(ctx context.Context, client coreclientv1.PodsGetter, recorder events.Recorder, required *corev1.Pod) (*corev1.Pod, bool, error) { + err := client.Pods(required.Namespace).Delete(ctx, required.Name, metav1.DeleteOptions{}) + if err != nil && apierrors.IsNotFound(err) { + return nil, false, nil + } + if err != nil { + return nil, false, err + } + reportDeleteEvent(recorder, required, err) + return nil, true, nil +} + +func DeleteServiceAccount(ctx context.Context, client coreclientv1.ServiceAccountsGetter, recorder events.Recorder, required *corev1.ServiceAccount) (*corev1.ServiceAccount, bool, error) { + err := client.ServiceAccounts(required.Namespace).Delete(ctx, required.Name, metav1.DeleteOptions{}) + if err != nil && apierrors.IsNotFound(err) { + return nil, false, nil + } + if err != nil { + return nil, false, err + } + reportDeleteEvent(recorder, required, err) + return nil, true, nil +} + +func DeleteConfigMap(ctx context.Context, client coreclientv1.ConfigMapsGetter, recorder events.Recorder, required *corev1.ConfigMap) (*corev1.ConfigMap, bool, error) { + err := client.ConfigMaps(required.Namespace).Delete(ctx, required.Name, metav1.DeleteOptions{}) + if err != nil && apierrors.IsNotFound(err) { + return nil, false, nil + } + if err != nil { + return nil, false, err + } + reportDeleteEvent(recorder, required, err) + return nil, true, nil +} + +func DeleteSecret(ctx context.Context, client coreclientv1.SecretsGetter, recorder events.Recorder, required *corev1.Secret) (*corev1.Secret, bool, error) { + err := client.Secrets(required.Namespace).Delete(ctx, required.Name, metav1.DeleteOptions{}) + if err != nil && apierrors.IsNotFound(err) { + return nil, false, nil + } + if err != nil { + return nil, false, err + } + reportDeleteEvent(recorder, required, err) + return nil, true, nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/credentialsrequest.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/credentialsrequest.go new file mode 100644 index 000000000..2de8136a8 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/credentialsrequest.go @@ -0,0 +1,106 @@ +package resourceapply + +import ( + "context" + "crypto/sha256" + "fmt" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/json" + "k8s.io/client-go/dynamic" + + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/resource/resourcehelper" +) + +const ( + CredentialsRequestGroup = "cloudcredential.openshift.io" + CredentialsRequestVersion = "v1" + CredentialsRequestResource = "credentialsrequests" +) + +var credentialsRequestResourceGVR schema.GroupVersionResource = schema.GroupVersionResource{ + Group: CredentialsRequestGroup, + Version: CredentialsRequestVersion, + Resource: CredentialsRequestResource, +} + +func AddCredentialsRequestHash(cr *unstructured.Unstructured) error { + jsonBytes, err := json.Marshal(cr.Object["spec"]) + if err != nil { + return err + } + specHash := fmt.Sprintf("%x", sha256.Sum256(jsonBytes)) + annotations := cr.GetAnnotations() + if annotations == nil { + annotations = map[string]string{} + } + annotations[specHashAnnotation] = specHash + cr.SetAnnotations(annotations) + return nil +} + +func ApplyCredentialsRequest( + ctx context.Context, + client dynamic.Interface, + recorder events.Recorder, + required *unstructured.Unstructured, + expectedGeneration int64, +) (*unstructured.Unstructured, bool, error) { + if required.GetName() == "" { + return nil, false, fmt.Errorf("invalid object: name cannot be empty") + } + + if err := AddCredentialsRequestHash(required); err != nil { + return nil, false, err + } + + crClient := client.Resource(credentialsRequestResourceGVR).Namespace(required.GetNamespace()) + existing, err := crClient.Get(ctx, required.GetName(), metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + actual, err := crClient.Create(ctx, required, metav1.CreateOptions{}) + if err == nil { + recorder.Eventf( + fmt.Sprintf("%sCreated", required.GetKind()), + "Created %s because it was missing", + resourcehelper.FormatResourceForCLIWithNamespace(required)) + return actual, true, err + } + recorder.Warningf( + fmt.Sprintf("%sCreateFailed", required.GetKind()), + "Failed to create %s: %v", + resourcehelper.FormatResourceForCLIWithNamespace(required), + err) + return nil, false, err + } + if err != nil { + return nil, false, err + } + + // Check CredentialRequest.Generation. + needApply := false + if existing.GetGeneration() != expectedGeneration { + needApply = true + } + + // Check specHashAnnotation + existingAnnotations := existing.GetAnnotations() + if existingAnnotations == nil || existingAnnotations[specHashAnnotation] != required.GetAnnotations()[specHashAnnotation] { + needApply = true + } + + if !needApply { + return existing, false, nil + } + + requiredCopy := required.DeepCopy() + existing.Object["spec"] = requiredCopy.Object["spec"] + actual, err := crClient.Update(ctx, existing, metav1.UpdateOptions{}) + if err != nil { + return nil, false, err + } + return actual, existing.GetResourceVersion() != actual.GetResourceVersion(), nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/event_helpers.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/event_helpers.go new file mode 100644 index 000000000..af598993f --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/event_helpers.go @@ -0,0 +1,56 @@ +package resourceapply + +import ( + "fmt" + "strings" + + "k8s.io/apimachinery/pkg/runtime" + + openshiftapi "github.com/openshift/api" + + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/resource/resourcehelper" +) + +var ( + openshiftScheme = runtime.NewScheme() +) + +func init() { + if err := openshiftapi.Install(openshiftScheme); err != nil { + panic(err) + } +} + +func reportCreateEvent(recorder events.Recorder, obj runtime.Object, originalErr error) { + gvk := resourcehelper.GuessObjectGroupVersionKind(obj) + if originalErr == nil { + recorder.Eventf(fmt.Sprintf("%sCreated", gvk.Kind), "Created %s because it was missing", resourcehelper.FormatResourceForCLIWithNamespace(obj)) + return + } + recorder.Warningf(fmt.Sprintf("%sCreateFailed", gvk.Kind), "Failed to create %s: %v", resourcehelper.FormatResourceForCLIWithNamespace(obj), originalErr) +} + +func reportUpdateEvent(recorder events.Recorder, obj runtime.Object, originalErr error, details ...string) { + gvk := resourcehelper.GuessObjectGroupVersionKind(obj) + switch { + case originalErr != nil: + recorder.Warningf(fmt.Sprintf("%sUpdateFailed", gvk.Kind), "Failed to update %s: %v", resourcehelper.FormatResourceForCLIWithNamespace(obj), originalErr) + case len(details) == 0: + recorder.Eventf(fmt.Sprintf("%sUpdated", gvk.Kind), "Updated %s because it changed", resourcehelper.FormatResourceForCLIWithNamespace(obj)) + default: + recorder.Eventf(fmt.Sprintf("%sUpdated", gvk.Kind), "Updated %s:\n%s", resourcehelper.FormatResourceForCLIWithNamespace(obj), strings.Join(details, "\n")) + } +} + +func reportDeleteEvent(recorder events.Recorder, obj runtime.Object, originalErr error, details ...string) { + gvk := resourcehelper.GuessObjectGroupVersionKind(obj) + switch { + case originalErr != nil: + recorder.Warningf(fmt.Sprintf("%sDeleteFailed", gvk.Kind), "Failed to delete %s: %v", resourcehelper.FormatResourceForCLIWithNamespace(obj), originalErr) + case len(details) == 0: + recorder.Eventf(fmt.Sprintf("%sDeleted", gvk.Kind), "Deleted %s", resourcehelper.FormatResourceForCLIWithNamespace(obj)) + default: + recorder.Eventf(fmt.Sprintf("%sDeleted", gvk.Kind), "Deleted %s:\n%s", resourcehelper.FormatResourceForCLIWithNamespace(obj), strings.Join(details, "\n")) + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/generic.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/generic.go new file mode 100644 index 000000000..c32c330bc --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/generic.go @@ -0,0 +1,371 @@ +package resourceapply + +import ( + "context" + "fmt" + + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + corev1 "k8s.io/api/core/v1" + policyv1 "k8s.io/api/policy/v1" + rbacv1 "k8s.io/api/rbac/v1" + storagev1 "k8s.io/api/storage/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/kubernetes" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + migrationv1alpha1 "sigs.k8s.io/kube-storage-version-migrator/pkg/apis/migration/v1alpha1" + migrationclient "sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset" + + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/resource/resourceread" + "github.com/openshift/library-go/pkg/operator/v1helpers" +) + +type AssetFunc func(name string) ([]byte, error) + +type ApplyResult struct { + File string + Type string + Result runtime.Object + Changed bool + Error error +} + +// ConditionalFunction provides needed dependency for a resource on another condition instead of blindly creating +// a resource. This conditional function can also be used to delete the resource when not needed +type ConditionalFunction func() bool + +type ClientHolder struct { + kubeClient kubernetes.Interface + apiExtensionsClient apiextensionsclient.Interface + kubeInformers v1helpers.KubeInformersForNamespaces + dynamicClient dynamic.Interface + migrationClient migrationclient.Interface +} + +func NewClientHolder() *ClientHolder { + return &ClientHolder{} +} + +func NewKubeClientHolder(client kubernetes.Interface) *ClientHolder { + return NewClientHolder().WithKubernetes(client) +} + +func (c *ClientHolder) WithKubernetes(client kubernetes.Interface) *ClientHolder { + c.kubeClient = client + return c +} + +func (c *ClientHolder) WithKubernetesInformers(kubeInformers v1helpers.KubeInformersForNamespaces) *ClientHolder { + c.kubeInformers = kubeInformers + return c +} + +func (c *ClientHolder) WithAPIExtensionsClient(client apiextensionsclient.Interface) *ClientHolder { + c.apiExtensionsClient = client + return c +} + +func (c *ClientHolder) WithDynamicClient(client dynamic.Interface) *ClientHolder { + c.dynamicClient = client + return c +} + +func (c *ClientHolder) WithMigrationClient(client migrationclient.Interface) *ClientHolder { + c.migrationClient = client + return c +} + +// ApplyDirectly applies the given manifest files to API server. +func ApplyDirectly(ctx context.Context, clients *ClientHolder, recorder events.Recorder, cache ResourceCache, manifests AssetFunc, files ...string) []ApplyResult { + ret := []ApplyResult{} + + for _, file := range files { + result := ApplyResult{File: file} + objBytes, err := manifests(file) + if err != nil { + result.Error = fmt.Errorf("missing %q: %v", file, err) + ret = append(ret, result) + continue + } + requiredObj, err := resourceread.ReadGenericWithUnstructured(objBytes) + if err != nil { + result.Error = fmt.Errorf("cannot decode %q: %v", file, err) + ret = append(ret, result) + continue + } + result.Type = fmt.Sprintf("%T", requiredObj) + + // NOTE: Do not add CR resources into this switch otherwise the protobuf client can cause problems. + switch t := requiredObj.(type) { + case *corev1.Namespace: + if clients.kubeClient == nil { + result.Error = fmt.Errorf("missing kubeClient") + } else { + result.Result, result.Changed, result.Error = ApplyNamespaceImproved(ctx, clients.kubeClient.CoreV1(), recorder, t, cache) + } + case *corev1.Service: + if clients.kubeClient == nil { + result.Error = fmt.Errorf("missing kubeClient") + } else { + result.Result, result.Changed, result.Error = ApplyServiceImproved(ctx, clients.kubeClient.CoreV1(), recorder, t, cache) + } + case *corev1.Pod: + if clients.kubeClient == nil { + result.Error = fmt.Errorf("missing kubeClient") + } else { + result.Result, result.Changed, result.Error = ApplyPodImproved(ctx, clients.kubeClient.CoreV1(), recorder, t, cache) + } + case *corev1.ServiceAccount: + if clients.kubeClient == nil { + result.Error = fmt.Errorf("missing kubeClient") + } else { + result.Result, result.Changed, result.Error = ApplyServiceAccountImproved(ctx, clients.kubeClient.CoreV1(), recorder, t, cache) + } + case *corev1.ConfigMap: + client := clients.configMapsGetter() + if client == nil { + result.Error = fmt.Errorf("missing kubeClient") + } else { + result.Result, result.Changed, result.Error = ApplyConfigMapImproved(ctx, client, recorder, t, cache) + } + case *corev1.Secret: + client := clients.secretsGetter() + if client == nil { + result.Error = fmt.Errorf("missing kubeClient") + } else { + result.Result, result.Changed, result.Error = ApplySecretImproved(ctx, client, recorder, t, cache) + } + case *rbacv1.ClusterRole: + if clients.kubeClient == nil { + result.Error = fmt.Errorf("missing kubeClient") + } else { + result.Result, result.Changed, result.Error = ApplyClusterRole(ctx, clients.kubeClient.RbacV1(), recorder, t) + } + case *rbacv1.ClusterRoleBinding: + if clients.kubeClient == nil { + result.Error = fmt.Errorf("missing kubeClient") + } else { + result.Result, result.Changed, result.Error = ApplyClusterRoleBinding(ctx, clients.kubeClient.RbacV1(), recorder, t) + } + case *rbacv1.Role: + if clients.kubeClient == nil { + result.Error = fmt.Errorf("missing kubeClient") + } else { + result.Result, result.Changed, result.Error = ApplyRole(ctx, clients.kubeClient.RbacV1(), recorder, t) + } + case *rbacv1.RoleBinding: + if clients.kubeClient == nil { + result.Error = fmt.Errorf("missing kubeClient") + } else { + result.Result, result.Changed, result.Error = ApplyRoleBinding(ctx, clients.kubeClient.RbacV1(), recorder, t) + } + case *policyv1.PodDisruptionBudget: + if clients.kubeClient == nil { + result.Error = fmt.Errorf("missing kubeClient") + } else { + result.Result, result.Changed, result.Error = ApplyPodDisruptionBudget(ctx, clients.kubeClient.PolicyV1(), recorder, t) + } + case *apiextensionsv1.CustomResourceDefinition: + if clients.apiExtensionsClient == nil { + result.Error = fmt.Errorf("missing apiExtensionsClient") + } else { + result.Result, result.Changed, result.Error = ApplyCustomResourceDefinitionV1(ctx, clients.apiExtensionsClient.ApiextensionsV1(), recorder, t) + } + case *storagev1.StorageClass: + if clients.kubeClient == nil { + result.Error = fmt.Errorf("missing kubeClient") + } else { + result.Result, result.Changed, result.Error = ApplyStorageClass(ctx, clients.kubeClient.StorageV1(), recorder, t) + } + case *admissionregistrationv1.ValidatingWebhookConfiguration: + if clients.kubeClient == nil { + result.Error = fmt.Errorf("missing kubeClient") + } else { + result.Result, result.Changed, result.Error = ApplyValidatingWebhookConfigurationImproved(ctx, clients.kubeClient.AdmissionregistrationV1(), recorder, t, cache) + } + case *admissionregistrationv1.MutatingWebhookConfiguration: + if clients.kubeClient == nil { + result.Error = fmt.Errorf("missing kubeClient") + } else { + result.Result, result.Changed, result.Error = ApplyMutatingWebhookConfigurationImproved(ctx, clients.kubeClient.AdmissionregistrationV1(), recorder, t, cache) + } + case *storagev1.CSIDriver: + if clients.kubeClient == nil { + result.Error = fmt.Errorf("missing kubeClient") + } else { + result.Result, result.Changed, result.Error = ApplyCSIDriver(ctx, clients.kubeClient.StorageV1(), recorder, t) + } + case *migrationv1alpha1.StorageVersionMigration: + if clients.migrationClient == nil { + result.Error = fmt.Errorf("missing migrationClient") + } else { + result.Result, result.Changed, result.Error = ApplyStorageVersionMigration(ctx, clients.migrationClient, recorder, t) + } + case *unstructured.Unstructured: + if clients.dynamicClient == nil { + result.Error = fmt.Errorf("missing dynamicClient") + } else { + result.Result, result.Changed, result.Error = ApplyKnownUnstructured(ctx, clients.dynamicClient, recorder, t) + } + default: + result.Error = fmt.Errorf("unhandled type %T", requiredObj) + } + + ret = append(ret, result) + } + + return ret +} + +func DeleteAll(ctx context.Context, clients *ClientHolder, recorder events.Recorder, manifests AssetFunc, + files ...string) []ApplyResult { + ret := []ApplyResult{} + + for _, file := range files { + result := ApplyResult{File: file} + objBytes, err := manifests(file) + if err != nil { + result.Error = fmt.Errorf("missing %q: %v", file, err) + ret = append(ret, result) + continue + } + requiredObj, err := resourceread.ReadGenericWithUnstructured(objBytes) + if err != nil { + result.Error = fmt.Errorf("cannot decode %q: %v", file, err) + ret = append(ret, result) + continue + } + result.Type = fmt.Sprintf("%T", requiredObj) + // NOTE: Do not add CR resources into this switch otherwise the protobuf client can cause problems. + switch t := requiredObj.(type) { + case *corev1.Namespace: + if clients.kubeClient == nil { + result.Error = fmt.Errorf("missing kubeClient") + } else { + _, result.Changed, result.Error = DeleteNamespace(ctx, clients.kubeClient.CoreV1(), recorder, t) + } + case *corev1.Service: + if clients.kubeClient == nil { + result.Error = fmt.Errorf("missing kubeClient") + } else { + _, result.Changed, result.Error = DeleteService(ctx, clients.kubeClient.CoreV1(), recorder, t) + } + case *corev1.Pod: + if clients.kubeClient == nil { + result.Error = fmt.Errorf("missing kubeClient") + } else { + _, result.Changed, result.Error = DeletePod(ctx, clients.kubeClient.CoreV1(), recorder, t) + } + case *corev1.ServiceAccount: + if clients.kubeClient == nil { + result.Error = fmt.Errorf("missing kubeClient") + } else { + _, result.Changed, result.Error = DeleteServiceAccount(ctx, clients.kubeClient.CoreV1(), recorder, t) + } + case *corev1.ConfigMap: + client := clients.configMapsGetter() + if client == nil { + result.Error = fmt.Errorf("missing kubeClient") + } else { + _, result.Changed, result.Error = DeleteConfigMap(ctx, client, recorder, t) + } + case *corev1.Secret: + client := clients.secretsGetter() + if client == nil { + result.Error = fmt.Errorf("missing kubeClient") + } else { + _, result.Changed, result.Error = DeleteSecret(ctx, client, recorder, t) + } + case *rbacv1.ClusterRole: + if clients.kubeClient == nil { + result.Error = fmt.Errorf("missing kubeClient") + } else { + _, result.Changed, result.Error = DeleteClusterRole(ctx, clients.kubeClient.RbacV1(), recorder, t) + } + case *rbacv1.ClusterRoleBinding: + if clients.kubeClient == nil { + result.Error = fmt.Errorf("missing kubeClient") + } else { + _, result.Changed, result.Error = DeleteClusterRoleBinding(ctx, clients.kubeClient.RbacV1(), recorder, t) + } + case *rbacv1.Role: + if clients.kubeClient == nil { + result.Error = fmt.Errorf("missing kubeClient") + } else { + _, result.Changed, result.Error = DeleteRole(ctx, clients.kubeClient.RbacV1(), recorder, t) + } + case *rbacv1.RoleBinding: + if clients.kubeClient == nil { + result.Error = fmt.Errorf("missing kubeClient") + } else { + _, result.Changed, result.Error = DeleteRoleBinding(ctx, clients.kubeClient.RbacV1(), recorder, t) + } + case *policyv1.PodDisruptionBudget: + if clients.kubeClient == nil { + result.Error = fmt.Errorf("missing kubeClient") + } else { + _, result.Changed, result.Error = DeletePodDisruptionBudget(ctx, clients.kubeClient.PolicyV1(), recorder, t) + } + case *apiextensionsv1.CustomResourceDefinition: + if clients.apiExtensionsClient == nil { + result.Error = fmt.Errorf("missing apiExtensionsClient") + } else { + _, result.Changed, result.Error = DeleteCustomResourceDefinitionV1(ctx, clients.apiExtensionsClient.ApiextensionsV1(), recorder, t) + } + case *storagev1.StorageClass: + if clients.kubeClient == nil { + result.Error = fmt.Errorf("missing kubeClient") + } else { + _, result.Changed, result.Error = DeleteStorageClass(ctx, clients.kubeClient.StorageV1(), recorder, t) + } + case *storagev1.CSIDriver: + if clients.kubeClient == nil { + result.Error = fmt.Errorf("missing kubeClient") + } else { + _, result.Changed, result.Error = DeleteCSIDriver(ctx, clients.kubeClient.StorageV1(), recorder, t) + } + case *migrationv1alpha1.StorageVersionMigration: + if clients.migrationClient == nil { + result.Error = fmt.Errorf("missing migrationClient") + } else { + _, result.Changed, result.Error = DeleteStorageVersionMigration(ctx, clients.migrationClient, recorder, t) + } + case *unstructured.Unstructured: + if clients.dynamicClient == nil { + result.Error = fmt.Errorf("missing dynamicClient") + } else { + _, result.Changed, result.Error = DeleteKnownUnstructured(ctx, clients.dynamicClient, recorder, t) + } + default: + result.Error = fmt.Errorf("unhandled type %T", requiredObj) + } + + ret = append(ret, result) + } + + return ret +} + +func (c *ClientHolder) configMapsGetter() corev1client.ConfigMapsGetter { + if c.kubeClient == nil { + return nil + } + if c.kubeInformers == nil { + return c.kubeClient.CoreV1() + } + return v1helpers.CachedConfigMapGetter(c.kubeClient.CoreV1(), c.kubeInformers) +} + +func (c *ClientHolder) secretsGetter() corev1client.SecretsGetter { + if c.kubeClient == nil { + return nil + } + if c.kubeInformers == nil { + return c.kubeClient.CoreV1() + } + return v1helpers.CachedSecretGetter(c.kubeClient.CoreV1(), c.kubeInformers) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/json_patch_helpers.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/json_patch_helpers.go new file mode 100644 index 000000000..ac9699aff --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/json_patch_helpers.go @@ -0,0 +1,70 @@ +package resourceapply + +import ( + "fmt" + + patch "github.com/evanphx/json-patch" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" +) + +// JSONPatchNoError generates a JSON patch between original and modified objects and return the JSON as a string. +// Note: +// +// In case of error, the returned string will contain the error messages. +func JSONPatchNoError(original, modified runtime.Object) string { + if original == nil { + return "original object is nil" + } + if modified == nil { + return "modified object is nil" + } + originalJSON, err := runtime.Encode(unstructured.UnstructuredJSONScheme, original) + if err != nil { + return fmt.Sprintf("unable to decode original to JSON: %v", err) + } + modifiedJSON, err := runtime.Encode(unstructured.UnstructuredJSONScheme, modified) + if err != nil { + return fmt.Sprintf("unable to decode modified to JSON: %v", err) + } + patchBytes, err := patch.CreateMergePatch(originalJSON, modifiedJSON) + if err != nil { + return fmt.Sprintf("unable to create JSON patch: %v", err) + } + return string(patchBytes) +} + +// JSONPatchSecretNoError generates a JSON patch between original and modified secrets, hiding its data, +// and return the JSON as a string. +// +// Note: +// In case of error, the returned string will contain the error messages. +func JSONPatchSecretNoError(original, modified *corev1.Secret) string { + if original == nil { + return "original object is nil" + } + if modified == nil { + return "modified object is nil" + } + + safeModified := modified.DeepCopy() + safeOriginal := original.DeepCopy() + + for s := range safeOriginal.Data { + safeOriginal.Data[s] = []byte("OLD") + } + for s := range safeModified.Data { + if _, preoriginal := original.Data[s]; !preoriginal { + safeModified.Data[s] = []byte("NEW") + } else if !equality.Semantic.DeepEqual(original.Data[s], safeModified.Data[s]) { + safeModified.Data[s] = []byte("MODIFIED") + } else { + safeModified.Data[s] = []byte("OLD") + } + } + + return JSONPatchNoError(safeOriginal, safeModified) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/migration.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/migration.go new file mode 100644 index 000000000..d6df1f589 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/migration.go @@ -0,0 +1,59 @@ +package resourceapply + +import ( + "context" + "reflect" + + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/resource/resourcemerge" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/klog/v2" + "sigs.k8s.io/kube-storage-version-migrator/pkg/apis/migration/v1alpha1" + migrationv1alpha1 "sigs.k8s.io/kube-storage-version-migrator/pkg/apis/migration/v1alpha1" + migrationclientv1alpha1 "sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset" +) + +// ApplyStorageVersionMigration merges objectmeta and required data. +func ApplyStorageVersionMigration(ctx context.Context, client migrationclientv1alpha1.Interface, recorder events.Recorder, required *migrationv1alpha1.StorageVersionMigration) (*migrationv1alpha1.StorageVersionMigration, bool, error) { + clientInterface := client.MigrationV1alpha1().StorageVersionMigrations() + existing, err := clientInterface.Get(ctx, required.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + requiredCopy := required.DeepCopy() + actual, err := clientInterface.Create(ctx, resourcemerge.WithCleanLabelsAndAnnotations(requiredCopy).(*v1alpha1.StorageVersionMigration), metav1.CreateOptions{}) + reportCreateEvent(recorder, requiredCopy, err) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + modified := resourcemerge.BoolPtr(false) + existingCopy := existing.DeepCopy() + resourcemerge.EnsureObjectMeta(modified, &existingCopy.ObjectMeta, required.ObjectMeta) + if !*modified && reflect.DeepEqual(existingCopy.Spec, required.Spec) { + return existingCopy, false, nil + } + + if klog.V(4).Enabled() { + klog.Infof("StorageVersionMigration %q changes: %v", required.Name, JSONPatchNoError(existing, required)) + } + + required.Spec.Resource.DeepCopyInto(&existingCopy.Spec.Resource) + actual, err := clientInterface.Update(ctx, existingCopy, metav1.UpdateOptions{}) + reportUpdateEvent(recorder, required, err) + return actual, true, err +} + +func DeleteStorageVersionMigration(ctx context.Context, client migrationclientv1alpha1.Interface, recorder events.Recorder, required *migrationv1alpha1.StorageVersionMigration) (*migrationv1alpha1.StorageVersionMigration, bool, error) { + clientInterface := client.MigrationV1alpha1().StorageVersionMigrations() + err := clientInterface.Delete(ctx, required.Name, metav1.DeleteOptions{}) + if err != nil && apierrors.IsNotFound(err) { + return nil, false, nil + } + if err != nil { + return nil, false, err + } + reportDeleteEvent(recorder, required, err) + return nil, true, nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/monitoring.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/monitoring.go new file mode 100644 index 000000000..98ad5b0df --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/monitoring.go @@ -0,0 +1,168 @@ +package resourceapply + +import ( + "context" + + "github.com/openshift/library-go/pkg/operator/events" + "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/dynamic" + "k8s.io/klog/v2" +) + +var serviceMonitorGVR = schema.GroupVersionResource{Group: "monitoring.coreos.com", Version: "v1", Resource: "servicemonitors"} + +func ensureGenericSpec(required, existing *unstructured.Unstructured, mimicDefaultingFn mimicDefaultingFunc, equalityChecker equalityChecker) (*unstructured.Unstructured, bool, error) { + requiredCopy := required.DeepCopy() + mimicDefaultingFn(requiredCopy) + requiredSpec, _, err := unstructured.NestedMap(requiredCopy.UnstructuredContent(), "spec") + if err != nil { + return nil, false, err + } + existingSpec, _, err := unstructured.NestedMap(existing.UnstructuredContent(), "spec") + if err != nil { + return nil, false, err + } + + if equalityChecker.DeepEqual(existingSpec, requiredSpec) { + return existing, false, nil + } + + existingCopy := existing.DeepCopy() + if err := unstructured.SetNestedMap(existingCopy.UnstructuredContent(), requiredSpec, "spec"); err != nil { + return nil, true, err + } + + return existingCopy, true, nil +} + +// mimicDefaultingFunc is used to set fields that are defaulted. This allows for sparse manifests to apply correctly. +// For instance, if field .spec.foo is set to 10 if not set, then a function of this type could be used to set +// the field to 10 to match the comparison. This is soemtimes (often?) easier than updating the semantic equality. +// We often see this in places like RBAC and CRD. Logically it can happen generically too. +type mimicDefaultingFunc func(obj *unstructured.Unstructured) + +func noDefaulting(obj *unstructured.Unstructured) {} + +// equalityChecker allows for custom equality comparisons. This can be used to allow equality checks to skip certain +// operator managed fields. This capability allows something like .spec.scale to be specified or changed by a component +// like HPA. Use this capability sparingly. Most places ought to just use `equality.Semantic` +type equalityChecker interface { + DeepEqual(a1, a2 interface{}) bool +} + +// ApplyServiceMonitor applies the Prometheus service monitor. +func ApplyServiceMonitor(ctx context.Context, client dynamic.Interface, recorder events.Recorder, required *unstructured.Unstructured) (*unstructured.Unstructured, bool, error) { + namespace := required.GetNamespace() + existing, err := client.Resource(serviceMonitorGVR).Namespace(namespace).Get(ctx, required.GetName(), metav1.GetOptions{}) + if errors.IsNotFound(err) { + newObj, createErr := client.Resource(serviceMonitorGVR).Namespace(namespace).Create(ctx, required, metav1.CreateOptions{}) + if createErr != nil { + recorder.Warningf("ServiceMonitorCreateFailed", "Failed to create ServiceMonitor.monitoring.coreos.com/v1: %v", createErr) + return nil, true, createErr + } + recorder.Eventf("ServiceMonitorCreated", "Created ServiceMonitor.monitoring.coreos.com/v1 because it was missing") + return newObj, true, nil + } + if err != nil { + return nil, false, err + } + + existingCopy := existing.DeepCopy() + + toUpdate, modified, err := ensureGenericSpec(required, existingCopy, noDefaulting, equality.Semantic) + if err != nil { + return nil, false, err + } + + if !modified { + return nil, false, nil + } + + if klog.V(4).Enabled() { + klog.Infof("ServiceMonitor %q changes: %v", namespace+"/"+required.GetName(), JSONPatchNoError(existing, toUpdate)) + } + + newObj, err := client.Resource(serviceMonitorGVR).Namespace(namespace).Update(ctx, toUpdate, metav1.UpdateOptions{}) + if err != nil { + recorder.Warningf("ServiceMonitorUpdateFailed", "Failed to update ServiceMonitor.monitoring.coreos.com/v1: %v", err) + return nil, true, err + } + + recorder.Eventf("ServiceMonitorUpdated", "Updated ServiceMonitor.monitoring.coreos.com/v1 because it changed") + return newObj, true, err +} + +var prometheusRuleGVR = schema.GroupVersionResource{Group: "monitoring.coreos.com", Version: "v1", Resource: "prometheusrules"} + +// ApplyPrometheusRule applies the PrometheusRule +func ApplyPrometheusRule(ctx context.Context, client dynamic.Interface, recorder events.Recorder, required *unstructured.Unstructured) (*unstructured.Unstructured, bool, error) { + namespace := required.GetNamespace() + + existing, err := client.Resource(prometheusRuleGVR).Namespace(namespace).Get(ctx, required.GetName(), metav1.GetOptions{}) + if errors.IsNotFound(err) { + newObj, createErr := client.Resource(prometheusRuleGVR).Namespace(namespace).Create(ctx, required, metav1.CreateOptions{}) + if createErr != nil { + recorder.Warningf("PrometheusRuleCreateFailed", "Failed to create PrometheusRule.monitoring.coreos.com/v1: %v", createErr) + return nil, true, createErr + } + recorder.Eventf("PrometheusRuleCreated", "Created PrometheusRule.monitoring.coreos.com/v1 because it was missing") + return newObj, true, nil + } + if err != nil { + return nil, false, err + } + + existingCopy := existing.DeepCopy() + + toUpdate, modified, err := ensureGenericSpec(required, existingCopy, noDefaulting, equality.Semantic) + if err != nil { + return nil, false, err + } + + if !modified { + return nil, false, nil + } + + if klog.V(4).Enabled() { + klog.Infof("PrometheusRule %q changes: %v", namespace+"/"+required.GetName(), JSONPatchNoError(existing, toUpdate)) + } + + newObj, err := client.Resource(prometheusRuleGVR).Namespace(namespace).Update(ctx, toUpdate, metav1.UpdateOptions{}) + if err != nil { + recorder.Warningf("PrometheusRuleUpdateFailed", "Failed to update PrometheusRule.monitoring.coreos.com/v1: %v", err) + return nil, true, err + } + + recorder.Eventf("PrometheusRuleUpdated", "Updated PrometheusRule.monitoring.coreos.com/v1 because it changed") + return newObj, true, err +} + +func DeletePrometheusRule(ctx context.Context, client dynamic.Interface, recorder events.Recorder, required *unstructured.Unstructured) (*unstructured.Unstructured, bool, error) { + namespace := required.GetNamespace() + err := client.Resource(prometheusRuleGVR).Namespace(namespace).Delete(ctx, required.GetName(), metav1.DeleteOptions{}) + if err != nil && errors.IsNotFound(err) { + return nil, false, nil + } + if err != nil { + return nil, false, err + } + reportDeleteEvent(recorder, required, err) + return nil, true, nil +} + +func DeleteServiceMonitor(ctx context.Context, client dynamic.Interface, recorder events.Recorder, required *unstructured.Unstructured) (*unstructured.Unstructured, bool, error) { + namespace := required.GetNamespace() + err := client.Resource(serviceMonitorGVR).Namespace(namespace).Delete(ctx, required.GetName(), metav1.DeleteOptions{}) + if err != nil && errors.IsNotFound(err) { + return nil, false, nil + } + if err != nil { + return nil, false, err + } + reportDeleteEvent(recorder, required, err) + return nil, true, nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/policy.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/policy.go new file mode 100644 index 000000000..5bfe3b389 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/policy.go @@ -0,0 +1,60 @@ +package resourceapply + +import ( + "context" + + policyv1 "k8s.io/api/policy/v1" + "k8s.io/apimachinery/pkg/api/equality" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + policyclientv1 "k8s.io/client-go/kubernetes/typed/policy/v1" + "k8s.io/klog/v2" + + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/resource/resourcemerge" +) + +func ApplyPodDisruptionBudget(ctx context.Context, client policyclientv1.PodDisruptionBudgetsGetter, recorder events.Recorder, required *policyv1.PodDisruptionBudget) (*policyv1.PodDisruptionBudget, bool, error) { + existing, err := client.PodDisruptionBudgets(required.Namespace).Get(ctx, required.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + requiredCopy := required.DeepCopy() + actual, err := client.PodDisruptionBudgets(required.Namespace).Create( + ctx, resourcemerge.WithCleanLabelsAndAnnotations(requiredCopy).(*policyv1.PodDisruptionBudget), metav1.CreateOptions{}) + reportCreateEvent(recorder, required, err) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + modified := resourcemerge.BoolPtr(false) + existingCopy := existing.DeepCopy() + + resourcemerge.EnsureObjectMeta(modified, &existingCopy.ObjectMeta, required.ObjectMeta) + contentSame := equality.Semantic.DeepEqual(existingCopy.Spec, required.Spec) + if contentSame && !*modified { + return existingCopy, false, nil + } + + existingCopy.Spec = required.Spec + + if klog.V(4).Enabled() { + klog.Infof("PodDisruptionBudget %q changes: %v", required.Name, JSONPatchNoError(existing, existingCopy)) + } + + actual, err := client.PodDisruptionBudgets(required.Namespace).Update(ctx, existingCopy, metav1.UpdateOptions{}) + reportUpdateEvent(recorder, required, err) + return actual, true, err +} + +func DeletePodDisruptionBudget(ctx context.Context, client policyclientv1.PodDisruptionBudgetsGetter, recorder events.Recorder, required *policyv1.PodDisruptionBudget) (*policyv1.PodDisruptionBudget, bool, error) { + err := client.PodDisruptionBudgets(required.Namespace).Delete(ctx, required.Name, metav1.DeleteOptions{}) + if err != nil && apierrors.IsNotFound(err) { + return nil, false, nil + } + if err != nil { + return nil, false, err + } + reportDeleteEvent(recorder, required, err) + return nil, true, nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/rbac.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/rbac.go new file mode 100644 index 000000000..0e378edd2 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/rbac.go @@ -0,0 +1,246 @@ +package resourceapply + +import ( + "context" + "fmt" + + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/api/equality" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + rbacclientv1 "k8s.io/client-go/kubernetes/typed/rbac/v1" + "k8s.io/klog/v2" + + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/resource/resourcemerge" +) + +// ApplyClusterRole merges objectmeta, requires rules, aggregation rules are not allowed for now. +func ApplyClusterRole(ctx context.Context, client rbacclientv1.ClusterRolesGetter, recorder events.Recorder, required *rbacv1.ClusterRole) (*rbacv1.ClusterRole, bool, error) { + if required.AggregationRule != nil && len(required.AggregationRule.ClusterRoleSelectors) != 0 { + return nil, false, fmt.Errorf("cannot create an aggregated cluster role") + } + + existing, err := client.ClusterRoles().Get(ctx, required.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + requiredCopy := required.DeepCopy() + actual, err := client.ClusterRoles().Create( + ctx, resourcemerge.WithCleanLabelsAndAnnotations(requiredCopy).(*rbacv1.ClusterRole), metav1.CreateOptions{}) + reportCreateEvent(recorder, required, err) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + modified := resourcemerge.BoolPtr(false) + existingCopy := existing.DeepCopy() + + resourcemerge.EnsureObjectMeta(modified, &existingCopy.ObjectMeta, required.ObjectMeta) + contentSame := equality.Semantic.DeepEqual(existingCopy.Rules, required.Rules) + if contentSame && !*modified { + return existingCopy, false, nil + } + + existingCopy.Rules = required.Rules + existingCopy.AggregationRule = nil + + if klog.V(4).Enabled() { + klog.Infof("ClusterRole %q changes: %v", required.Name, JSONPatchNoError(existing, existingCopy)) + } + + actual, err := client.ClusterRoles().Update(ctx, existingCopy, metav1.UpdateOptions{}) + reportUpdateEvent(recorder, required, err) + return actual, true, err +} + +// ApplyClusterRoleBinding merges objectmeta, requires subjects and role refs +// TODO on non-matching roleref, delete and recreate +func ApplyClusterRoleBinding(ctx context.Context, client rbacclientv1.ClusterRoleBindingsGetter, recorder events.Recorder, required *rbacv1.ClusterRoleBinding) (*rbacv1.ClusterRoleBinding, bool, error) { + existing, err := client.ClusterRoleBindings().Get(ctx, required.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + requiredCopy := required.DeepCopy() + actual, err := client.ClusterRoleBindings().Create( + ctx, resourcemerge.WithCleanLabelsAndAnnotations(requiredCopy).(*rbacv1.ClusterRoleBinding), metav1.CreateOptions{}) + reportCreateEvent(recorder, required, err) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + modified := resourcemerge.BoolPtr(false) + existingCopy := existing.DeepCopy() + requiredCopy := required.DeepCopy() + + // Enforce apiGroup fields in roleRefs + existingCopy.RoleRef.APIGroup = rbacv1.GroupName + for i := range existingCopy.Subjects { + if existingCopy.Subjects[i].Kind == "User" { + existingCopy.Subjects[i].APIGroup = rbacv1.GroupName + } + } + + requiredCopy.RoleRef.APIGroup = rbacv1.GroupName + for i := range requiredCopy.Subjects { + if requiredCopy.Subjects[i].Kind == "User" { + requiredCopy.Subjects[i].APIGroup = rbacv1.GroupName + } + } + + resourcemerge.EnsureObjectMeta(modified, &existingCopy.ObjectMeta, requiredCopy.ObjectMeta) + + subjectsAreSame := equality.Semantic.DeepEqual(existingCopy.Subjects, requiredCopy.Subjects) + roleRefIsSame := equality.Semantic.DeepEqual(existingCopy.RoleRef, requiredCopy.RoleRef) + + if subjectsAreSame && roleRefIsSame && !*modified { + return existingCopy, false, nil + } + + existingCopy.Subjects = requiredCopy.Subjects + existingCopy.RoleRef = requiredCopy.RoleRef + + if klog.V(4).Enabled() { + klog.Infof("ClusterRoleBinding %q changes: %v", requiredCopy.Name, JSONPatchNoError(existing, existingCopy)) + } + + actual, err := client.ClusterRoleBindings().Update(ctx, existingCopy, metav1.UpdateOptions{}) + reportUpdateEvent(recorder, requiredCopy, err) + return actual, true, err +} + +// ApplyRole merges objectmeta, requires rules +func ApplyRole(ctx context.Context, client rbacclientv1.RolesGetter, recorder events.Recorder, required *rbacv1.Role) (*rbacv1.Role, bool, error) { + existing, err := client.Roles(required.Namespace).Get(ctx, required.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + requiredCopy := required.DeepCopy() + actual, err := client.Roles(required.Namespace).Create( + ctx, resourcemerge.WithCleanLabelsAndAnnotations(requiredCopy).(*rbacv1.Role), metav1.CreateOptions{}) + reportCreateEvent(recorder, required, err) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + modified := resourcemerge.BoolPtr(false) + existingCopy := existing.DeepCopy() + + resourcemerge.EnsureObjectMeta(modified, &existingCopy.ObjectMeta, required.ObjectMeta) + contentSame := equality.Semantic.DeepEqual(existingCopy.Rules, required.Rules) + if contentSame && !*modified { + return existingCopy, false, nil + } + + existingCopy.Rules = required.Rules + + if klog.V(4).Enabled() { + klog.Infof("Role %q changes: %v", required.Namespace+"/"+required.Name, JSONPatchNoError(existing, existingCopy)) + } + actual, err := client.Roles(required.Namespace).Update(ctx, existingCopy, metav1.UpdateOptions{}) + reportUpdateEvent(recorder, required, err) + return actual, true, err +} + +// ApplyRoleBinding merges objectmeta, requires subjects and role refs +// TODO on non-matching roleref, delete and recreate +func ApplyRoleBinding(ctx context.Context, client rbacclientv1.RoleBindingsGetter, recorder events.Recorder, required *rbacv1.RoleBinding) (*rbacv1.RoleBinding, bool, error) { + existing, err := client.RoleBindings(required.Namespace).Get(ctx, required.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + requiredCopy := required.DeepCopy() + actual, err := client.RoleBindings(required.Namespace).Create( + ctx, resourcemerge.WithCleanLabelsAndAnnotations(requiredCopy).(*rbacv1.RoleBinding), metav1.CreateOptions{}) + reportCreateEvent(recorder, required, err) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + modified := resourcemerge.BoolPtr(false) + existingCopy := existing.DeepCopy() + requiredCopy := required.DeepCopy() + + // Enforce apiGroup fields in roleRefs and subjects + existingCopy.RoleRef.APIGroup = rbacv1.GroupName + for i := range existingCopy.Subjects { + if existingCopy.Subjects[i].Kind == "User" { + existingCopy.Subjects[i].APIGroup = rbacv1.GroupName + } + } + + requiredCopy.RoleRef.APIGroup = rbacv1.GroupName + for i := range requiredCopy.Subjects { + if requiredCopy.Subjects[i].Kind == "User" { + requiredCopy.Subjects[i].APIGroup = rbacv1.GroupName + } + } + + resourcemerge.EnsureObjectMeta(modified, &existingCopy.ObjectMeta, requiredCopy.ObjectMeta) + + subjectsAreSame := equality.Semantic.DeepEqual(existingCopy.Subjects, requiredCopy.Subjects) + roleRefIsSame := equality.Semantic.DeepEqual(existingCopy.RoleRef, requiredCopy.RoleRef) + + if subjectsAreSame && roleRefIsSame && !*modified { + return existingCopy, false, nil + } + + existingCopy.Subjects = requiredCopy.Subjects + existingCopy.RoleRef = requiredCopy.RoleRef + + if klog.V(4).Enabled() { + klog.Infof("RoleBinding %q changes: %v", requiredCopy.Namespace+"/"+requiredCopy.Name, JSONPatchNoError(existing, existingCopy)) + } + + actual, err := client.RoleBindings(requiredCopy.Namespace).Update(ctx, existingCopy, metav1.UpdateOptions{}) + reportUpdateEvent(recorder, requiredCopy, err) + return actual, true, err +} + +func DeleteClusterRole(ctx context.Context, client rbacclientv1.ClusterRolesGetter, recorder events.Recorder, required *rbacv1.ClusterRole) (*rbacv1.ClusterRole, bool, error) { + err := client.ClusterRoles().Delete(ctx, required.Name, metav1.DeleteOptions{}) + if err != nil && apierrors.IsNotFound(err) { + return nil, false, nil + } + if err != nil { + return nil, false, err + } + reportDeleteEvent(recorder, required, err) + return nil, true, nil +} + +func DeleteClusterRoleBinding(ctx context.Context, client rbacclientv1.ClusterRoleBindingsGetter, recorder events.Recorder, required *rbacv1.ClusterRoleBinding) (*rbacv1.ClusterRoleBinding, bool, error) { + err := client.ClusterRoleBindings().Delete(ctx, required.Name, metav1.DeleteOptions{}) + if err != nil && apierrors.IsNotFound(err) { + return nil, false, nil + } + if err != nil { + return nil, false, err + } + reportDeleteEvent(recorder, required, err) + return nil, true, nil +} + +func DeleteRole(ctx context.Context, client rbacclientv1.RolesGetter, recorder events.Recorder, required *rbacv1.Role) (*rbacv1.Role, bool, error) { + err := client.Roles(required.Namespace).Delete(ctx, required.Name, metav1.DeleteOptions{}) + if err != nil && apierrors.IsNotFound(err) { + return nil, false, nil + } + if err != nil { + return nil, false, err + } + reportDeleteEvent(recorder, required, err) + return nil, true, nil +} + +func DeleteRoleBinding(ctx context.Context, client rbacclientv1.RoleBindingsGetter, recorder events.Recorder, required *rbacv1.RoleBinding) (*rbacv1.RoleBinding, bool, error) { + err := client.RoleBindings(required.Namespace).Delete(ctx, required.Name, metav1.DeleteOptions{}) + if err != nil && apierrors.IsNotFound(err) { + return nil, false, nil + } + if err != nil { + return nil, false, err + } + reportDeleteEvent(recorder, required, err) + return nil, true, nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/resource_cache.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/resource_cache.go new file mode 100644 index 000000000..daa1a5e15 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/resource_cache.go @@ -0,0 +1,168 @@ +package resourceapply + +import ( + "crypto/md5" + "fmt" + "io" + "reflect" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/klog/v2" +) + +type cachedVersionKey struct { + name string + namespace string + kind schema.GroupKind +} + +// record of resource metadata used to determine if its safe to return early from an ApplyFoo +// resourceHash is an ms5 hash of the required in an ApplyFoo that is computed in case the input changes +// resourceVersion is the received resourceVersion from the apiserver in response to an update that is comparable to the GET +type cachedResource struct { + resourceHash, resourceVersion string +} + +type resourceCache struct { + cache map[cachedVersionKey]cachedResource +} + +type ResourceCache interface { + UpdateCachedResourceMetadata(required runtime.Object, actual runtime.Object) + SafeToSkipApply(required runtime.Object, existing runtime.Object) bool +} + +func NewResourceCache() *resourceCache { + return &resourceCache{ + cache: map[cachedVersionKey]cachedResource{}, + } +} + +var noCache *resourceCache + +func getResourceMetadata(obj runtime.Object) (schema.GroupKind, string, string, string, error) { + if obj == nil { + return schema.GroupKind{}, "", "", "", fmt.Errorf("nil object has no metadata") + } + metadata, err := meta.Accessor(obj) + if err != nil { + return schema.GroupKind{}, "", "", "", err + } + if metadata == nil || reflect.ValueOf(metadata).IsNil() { + return schema.GroupKind{}, "", "", "", fmt.Errorf("object has no metadata") + } + resourceHash := hashOfResourceStruct(obj) + + // retrieve kind, sometimes this can be done via the accesor, sometimes not (depends on the type) + kind := schema.GroupKind{} + gvk := obj.GetObjectKind().GroupVersionKind() + if len(gvk.Kind) > 0 { + kind = gvk.GroupKind() + } else { + if currKind := getCoreGroupKind(obj); currKind != nil { + kind = *currKind + } + } + if len(kind.Kind) == 0 { + return schema.GroupKind{}, "", "", "", fmt.Errorf("unable to determine GroupKind of %T", obj) + } + + return kind, metadata.GetName(), metadata.GetNamespace(), resourceHash, nil +} + +func getResourceVersion(obj runtime.Object) (string, error) { + if obj == nil { + return "", fmt.Errorf("nil object has no resourceVersion") + } + metadata, err := meta.Accessor(obj) + if err != nil { + return "", err + } + if metadata == nil || reflect.ValueOf(metadata).IsNil() { + return "", fmt.Errorf("object has no metadata") + } + rv := metadata.GetResourceVersion() + if len(rv) == 0 { + return "", fmt.Errorf("missing resourceVersion") + } + + return rv, nil +} + +func (c *resourceCache) UpdateCachedResourceMetadata(required runtime.Object, actual runtime.Object) { + if c == nil || c.cache == nil { + return + } + if required == nil || actual == nil { + return + } + kind, name, namespace, resourceHash, err := getResourceMetadata(required) + if err != nil { + return + } + cacheKey := cachedVersionKey{ + name: name, + namespace: namespace, + kind: kind, + } + + resourceVersion, err := getResourceVersion(actual) + if err != nil { + klog.V(4).Infof("error reading resourceVersion %s:%s:%s %s", name, kind, namespace, err) + return + } + + c.cache[cacheKey] = cachedResource{resourceHash, resourceVersion} + klog.V(7).Infof("updated resourceVersion of %s:%s:%s %s", name, kind, namespace, resourceVersion) +} + +// in the circumstance that an ApplyFoo's 'required' is the same one which was previously +// applied for a given (name, kind, namespace) and the existing resource (if any), +// hasn't been modified since the ApplyFoo last updated that resource, then return true (we don't +// need to reapply the resource). Otherwise return false. +func (c *resourceCache) SafeToSkipApply(required runtime.Object, existing runtime.Object) bool { + if c == nil || c.cache == nil { + return false + } + if required == nil || existing == nil { + return false + } + kind, name, namespace, resourceHash, err := getResourceMetadata(required) + if err != nil { + return false + } + cacheKey := cachedVersionKey{ + name: name, + namespace: namespace, + kind: kind, + } + + resourceVersion, err := getResourceVersion(existing) + if err != nil { + return false + } + + var versionMatch, hashMatch bool + if cached, exists := c.cache[cacheKey]; exists { + versionMatch = cached.resourceVersion == resourceVersion + hashMatch = cached.resourceHash == resourceHash + if versionMatch && hashMatch { + klog.V(4).Infof("found matching resourceVersion & manifest hash") + return true + } + } + + return false +} + +// detect changes in a resource by caching a hash of the string representation of the resource +// note: some changes in a resource e.g. nil vs empty, will not be detected this way +func hashOfResourceStruct(o interface{}) string { + oString := fmt.Sprintf("%v", o) + h := md5.New() + io.WriteString(h, oString) + rval := fmt.Sprintf("%x", h.Sum(nil)) + return rval +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/storage.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/storage.go new file mode 100644 index 000000000..9bf7c38c5 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/storage.go @@ -0,0 +1,259 @@ +package resourceapply + +import ( + "context" + "fmt" + + storagev1 "k8s.io/api/storage/v1" + "k8s.io/apimachinery/pkg/api/equality" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + storageclientv1 "k8s.io/client-go/kubernetes/typed/storage/v1" + "k8s.io/klog/v2" + + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/resource/resourcemerge" +) + +const ( + // Label on the CSIDriver to declare the driver's effective pod security profile + csiInlineVolProfileLabel = "security.openshift.io/csi-ephemeral-volume-profile" + + defaultScAnnotationKey = "storageclass.kubernetes.io/is-default-class" +) + +var ( + // Exempt labels are not overwritten if the value has changed + exemptCSIDriverLabels = []string{ + csiInlineVolProfileLabel, + } +) + +// ApplyStorageClass merges objectmeta, tries to write everything else +func ApplyStorageClass(ctx context.Context, client storageclientv1.StorageClassesGetter, recorder events.Recorder, required *storagev1.StorageClass) (*storagev1.StorageClass, bool, + error) { + existing, err := client.StorageClasses().Get(ctx, required.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + requiredCopy := required.DeepCopy() + actual, err := client.StorageClasses().Create( + ctx, resourcemerge.WithCleanLabelsAndAnnotations(requiredCopy).(*storagev1.StorageClass), metav1.CreateOptions{}) + reportCreateEvent(recorder, required, err) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + if required.ObjectMeta.ResourceVersion != "" && required.ObjectMeta.ResourceVersion != existing.ObjectMeta.ResourceVersion { + err = fmt.Errorf("rejected to update StorageClass %s because the object has been modified: desired/actual ResourceVersion: %v/%v", + required.Name, required.ObjectMeta.ResourceVersion, existing.ObjectMeta.ResourceVersion) + return nil, false, err + } + // Our caller may not be able to set required.ObjectMeta.ResourceVersion. We only want to overwrite value of + // default storage class annotation if it is missing in existing.Annotations + if existing.Annotations != nil { + if _, ok := existing.Annotations[defaultScAnnotationKey]; ok { + if required.Annotations == nil { + required.Annotations = make(map[string]string) + } + required.Annotations[defaultScAnnotationKey] = existing.Annotations[defaultScAnnotationKey] + } + } + + // First, let's compare ObjectMeta from both objects + modified := resourcemerge.BoolPtr(false) + existingCopy := existing.DeepCopy() + resourcemerge.EnsureObjectMeta(modified, &existingCopy.ObjectMeta, required.ObjectMeta) + + // Second, let's compare the other fields. StorageClass doesn't have a spec and we don't + // want to miss fields, so we have to copy required to get all fields + // and then overwrite ObjectMeta and TypeMeta from the original. + requiredCopy := required.DeepCopy() + requiredCopy.ObjectMeta = *existingCopy.ObjectMeta.DeepCopy() + requiredCopy.TypeMeta = existingCopy.TypeMeta + + contentSame := equality.Semantic.DeepEqual(existingCopy, requiredCopy) + if contentSame && !*modified { + return existing, false, nil + } + + if klog.V(4).Enabled() { + klog.Infof("StorageClass %q changes: %v", required.Name, JSONPatchNoError(existingCopy, requiredCopy)) + } + + if storageClassNeedsRecreate(existingCopy, requiredCopy) { + requiredCopy.ObjectMeta.ResourceVersion = "" + err = client.StorageClasses().Delete(ctx, existingCopy.Name, metav1.DeleteOptions{}) + reportDeleteEvent(recorder, requiredCopy, err, "Deleting StorageClass to re-create it with updated parameters") + if err != nil && !apierrors.IsNotFound(err) { + return existing, false, err + } + actual, err := client.StorageClasses().Create(ctx, requiredCopy, metav1.CreateOptions{}) + if err != nil && apierrors.IsAlreadyExists(err) { + // Delete() few lines above did not really delete the object, + // the API server is probably waiting for a finalizer removal or so. + // Report an error, but something else than "Already exists", because + // that would be very confusing - Apply failed because the object + // already exists??? + err = fmt.Errorf("failed to re-create StorageClass %s, waiting for the original object to be deleted", existingCopy.Name) + } else if err != nil { + err = fmt.Errorf("failed to re-create StorageClass %s: %s", existingCopy.Name, err) + } + reportCreateEvent(recorder, actual, err) + return actual, true, err + } + + // Only mutable fields need a change + actual, err := client.StorageClasses().Update(ctx, requiredCopy, metav1.UpdateOptions{}) + reportUpdateEvent(recorder, required, err) + return actual, true, err +} + +func storageClassNeedsRecreate(oldSC, newSC *storagev1.StorageClass) bool { + // Based on kubernetes/kubernetes/pkg/apis/storage/validation/validation.go, + // these fields are immutable. + if !equality.Semantic.DeepEqual(oldSC.Parameters, newSC.Parameters) { + return true + } + if oldSC.Provisioner != newSC.Provisioner { + return true + } + + // In theory, ReclaimPolicy is always set, just in case: + if (oldSC.ReclaimPolicy == nil && newSC.ReclaimPolicy != nil) || + (oldSC.ReclaimPolicy != nil && newSC.ReclaimPolicy == nil) { + return true + } + if oldSC.ReclaimPolicy != nil && newSC.ReclaimPolicy != nil && *oldSC.ReclaimPolicy != *newSC.ReclaimPolicy { + return true + } + + if !equality.Semantic.DeepEqual(oldSC.VolumeBindingMode, newSC.VolumeBindingMode) { + return true + } + return false +} + +// ApplyCSIDriver merges objectmeta, does not worry about anything else +func ApplyCSIDriver(ctx context.Context, client storageclientv1.CSIDriversGetter, recorder events.Recorder, requiredOriginal *storagev1.CSIDriver) (*storagev1.CSIDriver, bool, error) { + + required := requiredOriginal.DeepCopy() + if required.Annotations == nil { + required.Annotations = map[string]string{} + } + if err := SetSpecHashAnnotation(&required.ObjectMeta, required.Spec); err != nil { + return nil, false, err + } + if err := validateRequiredCSIDriverLabels(required); err != nil { + return nil, false, err + } + + existing, err := client.CSIDrivers().Get(ctx, required.Name, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + requiredCopy := required.DeepCopy() + actual, err := client.CSIDrivers().Create( + ctx, resourcemerge.WithCleanLabelsAndAnnotations(requiredCopy).(*storagev1.CSIDriver), metav1.CreateOptions{}) + reportCreateEvent(recorder, required, err) + return actual, true, err + } + if err != nil { + return nil, false, err + } + + // Exempt labels are not overwritten if the value has changed. They get set + // once during creation, but the admin may choose to set a different value. + // If the label is removed, it reverts back to the default value. + for _, exemptLabel := range exemptCSIDriverLabels { + if existingValue, ok := existing.Labels[exemptLabel]; ok { + required.Labels[exemptLabel] = existingValue + } + } + + metadataModified := resourcemerge.BoolPtr(false) + existingCopy := existing.DeepCopy() + resourcemerge.EnsureObjectMeta(metadataModified, &existingCopy.ObjectMeta, required.ObjectMeta) + + requiredSpecHash := required.Annotations[specHashAnnotation] + existingSpecHash := existing.Annotations[specHashAnnotation] + sameSpec := requiredSpecHash == existingSpecHash + if sameSpec && !*metadataModified { + return existing, false, nil + } + + if klog.V(4).Enabled() { + klog.Infof("CSIDriver %q changes: %v", required.Name, JSONPatchNoError(existing, existingCopy)) + } + + if sameSpec { + // Update metadata by a simple Update call + actual, err := client.CSIDrivers().Update(ctx, existingCopy, metav1.UpdateOptions{}) + reportUpdateEvent(recorder, required, err) + return actual, true, err + } + + existingCopy.Spec = required.Spec + existingCopy.ObjectMeta.ResourceVersion = "" + // Spec is read-only after creation. Delete and re-create the object + err = client.CSIDrivers().Delete(ctx, existingCopy.Name, metav1.DeleteOptions{}) + reportDeleteEvent(recorder, existingCopy, err, "Deleting CSIDriver to re-create it with updated parameters") + if err != nil && !apierrors.IsNotFound(err) { + return existing, false, err + } + actual, err := client.CSIDrivers().Create(ctx, existingCopy, metav1.CreateOptions{}) + if err != nil && apierrors.IsAlreadyExists(err) { + // Delete() few lines above did not really delete the object, + // the API server is probably waiting for a finalizer removal or so. + // Report an error, but something else than "Already exists", because + // that would be very confusing - Apply failed because the object + // already exists??? + err = fmt.Errorf("failed to re-create CSIDriver object %s, waiting for the original object to be deleted", existingCopy.Name) + } else if err != nil { + err = fmt.Errorf("failed to re-create CSIDriver %s: %s", existingCopy.Name, err) + } + reportCreateEvent(recorder, existingCopy, err) + return actual, true, err +} + +func validateRequiredCSIDriverLabels(required *storagev1.CSIDriver) error { + supportsEphemeralVolumes := false + for _, mode := range required.Spec.VolumeLifecycleModes { + if mode == storagev1.VolumeLifecycleEphemeral { + supportsEphemeralVolumes = true + break + } + } + // All OCP managed CSI drivers that support the Ephemeral volume + // lifecycle mode must provide a profile label the be matched against + // the pod security policy for the namespace of the pod. + // Valid values are: restricted, baseline, privileged. + _, labelFound := required.Labels[csiInlineVolProfileLabel] + if supportsEphemeralVolumes && !labelFound { + return fmt.Errorf("CSIDriver %s supports Ephemeral volume lifecycle but is missing required label %s", required.Name, csiInlineVolProfileLabel) + } + return nil +} + +func DeleteStorageClass(ctx context.Context, client storageclientv1.StorageClassesGetter, recorder events.Recorder, required *storagev1.StorageClass) (*storagev1.StorageClass, bool, + error) { + err := client.StorageClasses().Delete(ctx, required.Name, metav1.DeleteOptions{}) + if err != nil && apierrors.IsNotFound(err) { + return nil, false, nil + } + if err != nil { + return nil, false, err + } + reportDeleteEvent(recorder, required, err) + return nil, true, nil +} + +func DeleteCSIDriver(ctx context.Context, client storageclientv1.CSIDriversGetter, recorder events.Recorder, required *storagev1.CSIDriver) (*storagev1.CSIDriver, bool, error) { + err := client.CSIDrivers().Delete(ctx, required.Name, metav1.DeleteOptions{}) + if err != nil && apierrors.IsNotFound(err) { + return nil, false, nil + } + if err != nil { + return nil, false, err + } + reportDeleteEvent(recorder, required, err) + return nil, true, nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/unstructured.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/unstructured.go new file mode 100644 index 000000000..1adb01aee --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/unstructured.go @@ -0,0 +1,42 @@ +package resourceapply + +import ( + "context" + "fmt" + + "github.com/openshift/library-go/pkg/operator/events" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/dynamic" +) + +// ApplyKnownUnstructured applies few selected Unstructured types, where it semantic knowledge +// to merge existing & required objects intelligently. Feel free to add more. +func ApplyKnownUnstructured(ctx context.Context, client dynamic.Interface, recorder events.Recorder, obj *unstructured.Unstructured) (*unstructured.Unstructured, bool, error) { + switch obj.GetObjectKind().GroupVersionKind().GroupKind() { + case schema.GroupKind{Group: "monitoring.coreos.com", Kind: "ServiceMonitor"}: + return ApplyServiceMonitor(ctx, client, recorder, obj) + case schema.GroupKind{Group: "monitoring.coreos.com", Kind: "PrometheusRule"}: + return ApplyPrometheusRule(ctx, client, recorder, obj) + case schema.GroupKind{Group: "snapshot.storage.k8s.io", Kind: "VolumeSnapshotClass"}: + return ApplyVolumeSnapshotClass(ctx, client, recorder, obj) + + } + + return nil, false, fmt.Errorf("unsupported object type: %s", obj.GetKind()) +} + +// DeleteKnownUnstructured deletes few selected Unstructured types +func DeleteKnownUnstructured(ctx context.Context, client dynamic.Interface, recorder events.Recorder, obj *unstructured.Unstructured) (*unstructured.Unstructured, bool, error) { + switch obj.GetObjectKind().GroupVersionKind().GroupKind() { + case schema.GroupKind{Group: "monitoring.coreos.com", Kind: "ServiceMonitor"}: + return DeleteServiceMonitor(ctx, client, recorder, obj) + case schema.GroupKind{Group: "monitoring.coreos.com", Kind: "PrometheusRule"}: + return DeletePrometheusRule(ctx, client, recorder, obj) + case schema.GroupKind{Group: "snapshot.storage.k8s.io", Kind: "VolumeSnapshotClass"}: + return DeleteVolumeSnapshotClass(ctx, client, recorder, obj) + + } + + return nil, false, fmt.Errorf("unsupported object type: %s", obj.GetKind()) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/volumesnapshotclass.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/volumesnapshotclass.go new file mode 100644 index 000000000..1a35b6d77 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceapply/volumesnapshotclass.go @@ -0,0 +1,129 @@ +package resourceapply + +import ( + "context" + + "k8s.io/klog/v2" + + "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/dynamic" + + "github.com/openshift/library-go/pkg/operator/events" +) + +const ( + VolumeSnapshotClassGroup = "snapshot.storage.k8s.io" + VolumeSnapshotClassVersion = "v1" + VolumeSnapshotClassResource = "volumesnapshotclasses" +) + +var volumeSnapshotClassResourceGVR schema.GroupVersionResource = schema.GroupVersionResource{ + Group: VolumeSnapshotClassGroup, + Version: VolumeSnapshotClassVersion, + Resource: VolumeSnapshotClassResource, +} + +func ensureGenericVolumeSnapshotClass(required, existing *unstructured.Unstructured) (*unstructured.Unstructured, bool, error) { + var existingCopy *unstructured.Unstructured + + // Apply "parameters" + requiredParameters, _, err := unstructured.NestedMap(required.UnstructuredContent(), "parameters") + if err != nil { + return nil, false, err + } + existingParameters, _, err := unstructured.NestedMap(existing.UnstructuredContent(), "parameters") + if err != nil { + return nil, false, err + } + if !equality.Semantic.DeepEqual(existingParameters, requiredParameters) { + if existingCopy == nil { + existingCopy = existing.DeepCopy() + } + if err := unstructured.SetNestedMap(existingCopy.UnstructuredContent(), requiredParameters, "parameters"); err != nil { + return nil, true, err + } + } + + // Apply "driver" and "deletionPolicy" + for _, fieldName := range []string{"driver", "deletionPolicy"} { + requiredField, _, err := unstructured.NestedString(required.UnstructuredContent(), fieldName) + if err != nil { + return nil, false, err + } + existingField, _, err := unstructured.NestedString(existing.UnstructuredContent(), fieldName) + if err != nil { + return nil, false, err + } + if requiredField != existingField { + if existingCopy == nil { + existingCopy = existing.DeepCopy() + } + if err := unstructured.SetNestedField(existingCopy.UnstructuredContent(), requiredField, fieldName); err != nil { + return nil, true, err + } + } + } + + // If existingCopy is not nil, then the object has been modified + if existingCopy != nil { + return existingCopy, true, nil + } + + return existing, false, nil +} + +// ApplyVolumeSnapshotClass applies Volume Snapshot Class. +func ApplyVolumeSnapshotClass(ctx context.Context, client dynamic.Interface, recorder events.Recorder, required *unstructured.Unstructured) (*unstructured.Unstructured, bool, error) { + existing, err := client.Resource(volumeSnapshotClassResourceGVR).Get(ctx, required.GetName(), metav1.GetOptions{}) + if errors.IsNotFound(err) { + newObj, createErr := client.Resource(volumeSnapshotClassResourceGVR).Create(ctx, required, metav1.CreateOptions{}) + if createErr != nil { + recorder.Warningf("VolumeSnapshotClassCreateFailed", "Failed to create VolumeSnapshotClass.snapshot.storage.k8s.io/v1: %v", createErr) + return nil, true, createErr + } + recorder.Eventf("VolumeSnapshotClassCreated", "Created VolumeSnapshotClass.snapshot.storage.k8s.io/v1 because it was missing") + return newObj, true, nil + } + if err != nil { + return nil, false, err + } + + toUpdate, modified, err := ensureGenericVolumeSnapshotClass(required, existing) + if err != nil { + return nil, false, err + } + + if !modified { + return existing, false, nil + } + + if klog.V(4).Enabled() { + klog.Infof("VolumeSnapshotClass %q changes: %v", required.GetName(), JSONPatchNoError(existing, toUpdate)) + } + + newObj, err := client.Resource(volumeSnapshotClassResourceGVR).Update(ctx, toUpdate, metav1.UpdateOptions{}) + if err != nil { + recorder.Warningf("VolumeSnapshotClassFailed", "Failed to update VolumeSnapshotClass.snapshot.storage.k8s.io/v1: %v", err) + return nil, true, err + } + + recorder.Eventf("VolumeSnapshotClassUpdated", "Updated VolumeSnapshotClass.snapshot.storage.k8s.io/v1 because it changed") + return newObj, true, err +} + +func DeleteVolumeSnapshotClass(ctx context.Context, client dynamic.Interface, recorder events.Recorder, required *unstructured.Unstructured) (*unstructured.Unstructured, bool, error) { + namespace := required.GetNamespace() + err := client.Resource(volumeSnapshotClassResourceGVR).Namespace(namespace).Delete(ctx, required.GetName(), metav1.DeleteOptions{}) + if err != nil && errors.IsNotFound(err) { + return nil, false, nil + } + if err != nil { + return nil, false, err + } + reportDeleteEvent(recorder, required, err) + return nil, true, nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcehelper/resource_helpers.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcehelper/resource_helpers.go new file mode 100644 index 000000000..43ea9111c --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcehelper/resource_helpers.go @@ -0,0 +1,76 @@ +package resourcehelper + +import ( + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/kubernetes/scheme" + + "github.com/openshift/api" +) + +var ( + openshiftScheme = runtime.NewScheme() +) + +func init() { + if err := api.Install(openshiftScheme); err != nil { + panic(err) + } +} + +// FormatResourceForCLIWithNamespace generates a string that can be copy/pasted for use with oc get that includes +// specifying the namespace with the -n option (e.g., `ConfigMap/cluster-config-v1 -n kube-system`). +func FormatResourceForCLIWithNamespace(obj runtime.Object) string { + gvk := GuessObjectGroupVersionKind(obj) + kind := gvk.Kind + group := gvk.Group + var name, namespace string + accessor, err := meta.Accessor(obj) + if err != nil { + name = "" + namespace = "" + } else { + name = accessor.GetName() + namespace = accessor.GetNamespace() + } + if len(group) > 0 { + group = "." + group + } + if len(namespace) > 0 { + namespace = " -n " + namespace + } + return kind + group + "/" + name + namespace +} + +// FormatResourceForCLI generates a string that can be copy/pasted for use with oc get. +func FormatResourceForCLI(obj runtime.Object) string { + gvk := GuessObjectGroupVersionKind(obj) + kind := gvk.Kind + group := gvk.Group + var name string + accessor, err := meta.Accessor(obj) + if err != nil { + name = "" + } else { + name = accessor.GetName() + } + if len(group) > 0 { + group = "." + group + } + return kind + group + "/" + name +} + +// GuessObjectGroupVersionKind returns a human readable for the passed runtime object. +func GuessObjectGroupVersionKind(object runtime.Object) schema.GroupVersionKind { + if gvk := object.GetObjectKind().GroupVersionKind(); len(gvk.Kind) > 0 { + return gvk + } + if kinds, _, _ := scheme.Scheme.ObjectKinds(object); len(kinds) > 0 { + return kinds[0] + } + if kinds, _, _ := openshiftScheme.ObjectKinds(object); len(kinds) > 0 { + return kinds[0] + } + return schema.GroupVersionKind{Kind: ""} +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/admissionregistration.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/admissionregistration.go new file mode 100644 index 000000000..2fcfd1394 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/admissionregistration.go @@ -0,0 +1,51 @@ +package resourcemerge + +import ( + operatorsv1 "github.com/openshift/api/operator/v1" + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// ExpectedMutatingWebhooksConfiguration returns last applied generation for MutatingWebhookConfiguration resource registered in operator +func ExpectedMutatingWebhooksConfiguration(name string, previousGenerations []operatorsv1.GenerationStatus) int64 { + generation := GenerationFor(previousGenerations, schema.GroupResource{Group: admissionregistrationv1.SchemeGroupVersion.Group, Resource: "mutatingwebhookconfigurations"}, "", name) + if generation != nil { + return generation.LastGeneration + } + return -1 +} + +// SetMutatingWebhooksConfigurationGeneration updates operator generation status list with last applied generation for provided MutatingWebhookConfiguration resource +func SetMutatingWebhooksConfigurationGeneration(generations *[]operatorsv1.GenerationStatus, actual *admissionregistrationv1.MutatingWebhookConfiguration) { + if actual == nil { + return + } + SetGeneration(generations, operatorsv1.GenerationStatus{ + Group: admissionregistrationv1.SchemeGroupVersion.Group, + Resource: "mutatingwebhookconfigurations", + Name: actual.Name, + LastGeneration: actual.ObjectMeta.Generation, + }) +} + +// ExpectedValidatingWebhooksConfiguration returns last applied generation for ValidatingWebhookConfiguration resource registered in operator +func ExpectedValidatingWebhooksConfiguration(name string, previousGenerations []operatorsv1.GenerationStatus) int64 { + generation := GenerationFor(previousGenerations, schema.GroupResource{Group: admissionregistrationv1.SchemeGroupVersion.Group, Resource: "validatingwebhookconfigurations"}, "", name) + if generation != nil { + return generation.LastGeneration + } + return -1 +} + +// SetValidatingWebhooksConfigurationGeneration updates operator generation status list with last applied generation for provided ValidatingWebhookConfiguration resource +func SetValidatingWebhooksConfigurationGeneration(generations *[]operatorsv1.GenerationStatus, actual *admissionregistrationv1.ValidatingWebhookConfiguration) { + if actual == nil { + return + } + SetGeneration(generations, operatorsv1.GenerationStatus{ + Group: admissionregistrationv1.SchemeGroupVersion.Group, + Resource: "validatingwebhookconfigurations", + Name: actual.Name, + LastGeneration: actual.ObjectMeta.Generation, + }) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/apiextensions.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/apiextensions.go new file mode 100644 index 000000000..754a5aabe --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/apiextensions.go @@ -0,0 +1,68 @@ +package resourcemerge + +import ( + "strings" + + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + "k8s.io/apimachinery/pkg/api/equality" + utilpointer "k8s.io/utils/pointer" +) + +// EnsureCustomResourceDefinitionV1Beta1 ensures that the existing matches the required. +// modified is set to true when existing had to be updated with required. +func EnsureCustomResourceDefinitionV1Beta1(modified *bool, existing *apiextensionsv1beta1.CustomResourceDefinition, required apiextensionsv1beta1.CustomResourceDefinition) { + EnsureObjectMeta(modified, &existing.ObjectMeta, required.ObjectMeta) + + // we stomp everything + if !equality.Semantic.DeepEqual(existing.Spec, required.Spec) { + *modified = true + existing.Spec = required.Spec + } +} + +// EnsureCustomResourceDefinitionV1 ensures that the existing matches the required. +// modified is set to true when existing had to be updated with required. +func EnsureCustomResourceDefinitionV1(modified *bool, existing *apiextensionsv1.CustomResourceDefinition, required apiextensionsv1.CustomResourceDefinition) { + EnsureObjectMeta(modified, &existing.ObjectMeta, required.ObjectMeta) + + // we need to match defaults + mimicCRDV1Defaulting(&required) + // we stomp everything + if !equality.Semantic.DeepEqual(existing.Spec, required.Spec) { + *modified = true + existing.Spec = required.Spec + } +} + +func mimicCRDV1Defaulting(required *apiextensionsv1.CustomResourceDefinition) { + crd_SetDefaults_CustomResourceDefinitionSpec(&required.Spec) + + if required.Spec.Conversion != nil && + required.Spec.Conversion.Webhook != nil && + required.Spec.Conversion.Webhook.ClientConfig != nil && + required.Spec.Conversion.Webhook.ClientConfig.Service != nil { + crd_SetDefaults_ServiceReference(required.Spec.Conversion.Webhook.ClientConfig.Service) + } +} + +// lifted from https://github.com/kubernetes/kubernetes/blob/v1.21.0/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1/defaults.go#L42-L61 +func crd_SetDefaults_CustomResourceDefinitionSpec(obj *apiextensionsv1.CustomResourceDefinitionSpec) { + if len(obj.Names.Singular) == 0 { + obj.Names.Singular = strings.ToLower(obj.Names.Kind) + } + if len(obj.Names.ListKind) == 0 && len(obj.Names.Kind) > 0 { + obj.Names.ListKind = obj.Names.Kind + "List" + } + if obj.Conversion == nil { + obj.Conversion = &apiextensionsv1.CustomResourceConversion{ + Strategy: apiextensionsv1.NoneConverter, + } + } +} + +func crd_SetDefaults_ServiceReference(obj *apiextensionsv1.ServiceReference) { + if obj.Port == nil { + obj.Port = utilpointer.Int32Ptr(443) + } +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/apps.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/apps.go new file mode 100644 index 000000000..1731382e6 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/apps.go @@ -0,0 +1,80 @@ +package resourcemerge + +import ( + appsv1 "k8s.io/api/apps/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + operatorsv1 "github.com/openshift/api/operator/v1" +) + +func GenerationFor(generations []operatorsv1.GenerationStatus, resource schema.GroupResource, namespace, name string) *operatorsv1.GenerationStatus { + for i := range generations { + curr := &generations[i] + if curr.Namespace == namespace && + curr.Name == name && + curr.Group == resource.Group && + curr.Resource == resource.Resource { + + return curr + } + } + + return nil +} + +func SetGeneration(generations *[]operatorsv1.GenerationStatus, newGeneration operatorsv1.GenerationStatus) { + if generations == nil { + generations = &[]operatorsv1.GenerationStatus{} + } + + existingGeneration := GenerationFor(*generations, schema.GroupResource{Group: newGeneration.Group, Resource: newGeneration.Resource}, newGeneration.Namespace, newGeneration.Name) + if existingGeneration == nil { + *generations = append(*generations, newGeneration) + return + } + + existingGeneration.LastGeneration = newGeneration.LastGeneration + existingGeneration.Hash = newGeneration.Hash +} + +func ExpectedDeploymentGeneration(required *appsv1.Deployment, previousGenerations []operatorsv1.GenerationStatus) int64 { + generation := GenerationFor(previousGenerations, schema.GroupResource{Group: "apps", Resource: "deployments"}, required.Namespace, required.Name) + if generation != nil { + return generation.LastGeneration + } + return -1 +} + +func SetDeploymentGeneration(generations *[]operatorsv1.GenerationStatus, actual *appsv1.Deployment) { + if actual == nil { + return + } + SetGeneration(generations, operatorsv1.GenerationStatus{ + Group: "apps", + Resource: "deployments", + Namespace: actual.Namespace, + Name: actual.Name, + LastGeneration: actual.ObjectMeta.Generation, + }) +} + +func ExpectedDaemonSetGeneration(required *appsv1.DaemonSet, previousGenerations []operatorsv1.GenerationStatus) int64 { + generation := GenerationFor(previousGenerations, schema.GroupResource{Group: "apps", Resource: "daemonsets"}, required.Namespace, required.Name) + if generation != nil { + return generation.LastGeneration + } + return -1 +} + +func SetDaemonSetGeneration(generations *[]operatorsv1.GenerationStatus, actual *appsv1.DaemonSet) { + if actual == nil { + return + } + SetGeneration(generations, operatorsv1.GenerationStatus{ + Group: "apps", + Resource: "daemonsets", + Namespace: actual.Namespace, + Name: actual.Name, + LastGeneration: actual.ObjectMeta.Generation, + }) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/generic_config_merger.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/generic_config_merger.go new file mode 100644 index 000000000..f1e6d0c9f --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/generic_config_merger.go @@ -0,0 +1,271 @@ +package resourcemerge + +import ( + "bytes" + "encoding/json" + "fmt" + "reflect" + "strings" + + "k8s.io/klog/v2" + "sigs.k8s.io/yaml" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + kyaml "k8s.io/apimachinery/pkg/util/yaml" +) + +// MergeConfigMap takes a configmap, the target key, special overlay funcs a list of config configs to overlay on top of each other +// It returns the resultant configmap and a bool indicating if any changes were made to the configmap +func MergeConfigMap(configMap *corev1.ConfigMap, configKey string, specialCases map[string]MergeFunc, configYAMLs ...[]byte) (*corev1.ConfigMap, bool, error) { + return MergePrunedConfigMap(nil, configMap, configKey, specialCases, configYAMLs...) +} + +// MergePrunedConfigMap takes a configmap, the target key, special overlay funcs a list of config configs to overlay on top of each other +// It returns the resultant configmap and a bool indicating if any changes were made to the configmap. +// It roundtrips the config through the given schema. +func MergePrunedConfigMap(schema runtime.Object, configMap *corev1.ConfigMap, configKey string, specialCases map[string]MergeFunc, configYAMLs ...[]byte) (*corev1.ConfigMap, bool, error) { + configBytes, err := MergePrunedProcessConfig(schema, specialCases, configYAMLs...) + if err != nil { + return nil, false, err + } + + if reflect.DeepEqual(configMap.Data[configKey], configBytes) { + return configMap, false, nil + } + + ret := configMap.DeepCopy() + ret.Data[configKey] = string(configBytes) + + return ret, true, nil +} + +// MergeProcessConfig merges a series of config yaml files together with each later one overlaying all previous +func MergeProcessConfig(specialCases map[string]MergeFunc, configYAMLs ...[]byte) ([]byte, error) { + currentConfigYAML := configYAMLs[0] + + for _, currConfigYAML := range configYAMLs[1:] { + prevConfigJSON, err := kyaml.ToJSON(currentConfigYAML) + if err != nil { + klog.Warning(err) + // maybe it's just json + prevConfigJSON = currentConfigYAML + } + prevConfig := map[string]interface{}{} + if err := json.NewDecoder(bytes.NewBuffer(prevConfigJSON)).Decode(&prevConfig); err != nil { + return nil, err + } + + if len(currConfigYAML) > 0 { + currConfigJSON, err := kyaml.ToJSON(currConfigYAML) + if err != nil { + klog.Warning(err) + // maybe it's just json + currConfigJSON = currConfigYAML + } + currConfig := map[string]interface{}{} + if err := json.NewDecoder(bytes.NewBuffer(currConfigJSON)).Decode(&currConfig); err != nil { + return nil, err + } + + // protected against mismatched typemeta + prevAPIVersion, _, _ := unstructured.NestedString(prevConfig, "apiVersion") + prevKind, _, _ := unstructured.NestedString(prevConfig, "kind") + currAPIVersion, _, _ := unstructured.NestedString(currConfig, "apiVersion") + currKind, _, _ := unstructured.NestedString(currConfig, "kind") + currGVKSet := len(currAPIVersion) > 0 || len(currKind) > 0 + gvkMismatched := currAPIVersion != prevAPIVersion || currKind != prevKind + if currGVKSet && gvkMismatched { + return nil, fmt.Errorf("%v/%v does not equal %v/%v", currAPIVersion, currKind, prevAPIVersion, prevKind) + } + + if err := mergeConfig(prevConfig, currConfig, "", specialCases); err != nil { + return nil, err + } + } + + currentConfigYAML, err = runtime.Encode(unstructured.UnstructuredJSONScheme, &unstructured.Unstructured{Object: prevConfig}) + if err != nil { + return nil, err + } + } + + return currentConfigYAML, nil +} + +// MergePrunedProcessConfig merges a series of config yaml files together with each later one overlaying all previous. +// The result is roundtripped through the given schema if it is non-nil. +func MergePrunedProcessConfig(schema runtime.Object, specialCases map[string]MergeFunc, configYAMLs ...[]byte) ([]byte, error) { + bs, err := MergeProcessConfig(specialCases, configYAMLs...) + if err != nil { + return nil, err + } + + if schema == nil { + return bs, nil + } + + // roundtrip through the schema + typed := schema.DeepCopyObject() + if err := yaml.Unmarshal(bs, typed); err != nil { + return nil, err + } + typedBytes, err := json.Marshal(typed) + if err != nil { + return nil, err + } + var untypedJSON map[string]interface{} + if err := json.Unmarshal(typedBytes, &untypedJSON); err != nil { + return nil, err + } + + // and intersect output with input because we cannot rely on omitempty in the schema + inputBytes, err := yaml.YAMLToJSON(bs) + if err != nil { + return nil, err + } + var inputJSON map[string]interface{} + if err := json.Unmarshal(inputBytes, &inputJSON); err != nil { + return nil, err + } + return json.Marshal(intersectJSON(inputJSON, untypedJSON)) +} + +type MergeFunc func(dst, src interface{}, currentPath string) (interface{}, error) + +var _ MergeFunc = RemoveConfig + +// RemoveConfig is a merge func that elimintes an entire path from the config +func RemoveConfig(dst, src interface{}, currentPath string) (interface{}, error) { + return dst, nil +} + +// mergeConfig overwrites entries in curr by additional. It modifies curr. +func mergeConfig(curr, additional map[string]interface{}, currentPath string, specialCases map[string]MergeFunc) error { + for additionalKey, additionalVal := range additional { + fullKey := currentPath + "." + additionalKey + specialCase, ok := specialCases[fullKey] + if ok { + var err error + curr[additionalKey], err = specialCase(curr[additionalKey], additionalVal, currentPath) + if err != nil { + return err + } + continue + } + + currVal, ok := curr[additionalKey] + if !ok { + curr[additionalKey] = additionalVal + continue + } + + // only some scalars are accepted + switch castVal := additionalVal.(type) { + case map[string]interface{}: + currValAsMap, ok := currVal.(map[string]interface{}) + if !ok { + currValAsMap = map[string]interface{}{} + curr[additionalKey] = currValAsMap + } + + err := mergeConfig(currValAsMap, castVal, fullKey, specialCases) + if err != nil { + return err + } + continue + + default: + if err := unstructured.SetNestedField(curr, castVal, additionalKey); err != nil { + return err + } + } + + } + + return nil +} + +// jsonIntersection returns the intersection of both JSON object, +// preferring the values of the first argument. +func intersectJSON(x1, x2 map[string]interface{}) map[string]interface{} { + if x1 == nil || x2 == nil { + return nil + } + ret := map[string]interface{}{} + for k, v1 := range x1 { + v2, ok := x2[k] + if !ok { + continue + } + ret[k] = intersectValue(v1, v2) + } + return ret +} + +func intersectArray(x1, x2 []interface{}) []interface{} { + if x1 == nil || x2 == nil { + return nil + } + ret := make([]interface{}, 0, len(x1)) + for i := range x1 { + if i >= len(x2) { + break + } + ret = append(ret, intersectValue(x1[i], x2[i])) + } + return ret +} + +func intersectValue(x1, x2 interface{}) interface{} { + switch x1 := x1.(type) { + case map[string]interface{}: + x2, ok := x2.(map[string]interface{}) + if !ok { + return x1 + } + return intersectJSON(x1, x2) + case []interface{}: + x2, ok := x2.([]interface{}) + if !ok { + return x1 + } + return intersectArray(x1, x2) + default: + return x1 + } +} + +// IsRequiredConfigPresent can check an observedConfig to see if certain required paths are present in that config. +// This allows operators to require certain configuration to be observed before proceeding to honor a configuration or roll it out. +func IsRequiredConfigPresent(config []byte, requiredPaths [][]string) error { + if len(config) == 0 { + return fmt.Errorf("no observedConfig") + } + + existingConfig := map[string]interface{}{} + if err := json.NewDecoder(bytes.NewBuffer(config)).Decode(&existingConfig); err != nil { + return fmt.Errorf("error parsing config, %v", err) + } + + for _, requiredPath := range requiredPaths { + configVal, found, err := unstructured.NestedFieldNoCopy(existingConfig, requiredPath...) + if err != nil { + return fmt.Errorf("error reading %v from config, %v", strings.Join(requiredPath, "."), err) + } + if !found { + return fmt.Errorf("%v missing from config", strings.Join(requiredPath, ".")) + } + if configVal == nil { + return fmt.Errorf("%v null in config", strings.Join(requiredPath, ".")) + } + if configValSlice, ok := configVal.([]interface{}); ok && len(configValSlice) == 0 { + return fmt.Errorf("%v empty in config", strings.Join(requiredPath, ".")) + } + if configValString, ok := configVal.(string); ok && len(configValString) == 0 { + return fmt.Errorf("%v empty in config", strings.Join(requiredPath, ".")) + } + } + return nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/object_merger.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/object_merger.go new file mode 100644 index 000000000..4881c4b8a --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourcemerge/object_merger.go @@ -0,0 +1,277 @@ +package resourcemerge + +import ( + "reflect" + "strings" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// EnsureObjectMeta writes namespace, name, labels, and annotations. Don't set other things here. +// TODO finalizer support maybe? +func EnsureObjectMeta(modified *bool, existing *metav1.ObjectMeta, required metav1.ObjectMeta) { + SetStringIfSet(modified, &existing.Namespace, required.Namespace) + SetStringIfSet(modified, &existing.Name, required.Name) + MergeMap(modified, &existing.Labels, required.Labels) + MergeMap(modified, &existing.Annotations, required.Annotations) + MergeOwnerRefs(modified, &existing.OwnerReferences, required.OwnerReferences) +} + +// WithCleanLabelsAndAnnotations cleans the metadata off the removal annotations/labels/ownerrefs +// (those that end with trailing "-") +func WithCleanLabelsAndAnnotations(obj metav1.Object) metav1.Object { + obj.SetAnnotations(cleanRemovalKeys(obj.GetAnnotations())) + obj.SetLabels(cleanRemovalKeys(obj.GetLabels())) + obj.SetOwnerReferences(cleanRemovalOwnerRefs(obj.GetOwnerReferences())) + return obj +} + +func cleanRemovalKeys(required map[string]string) map[string]string { + for k := range required { + if strings.HasSuffix(k, "-") { + delete(required, k) + } + } + return required +} + +func stringPtr(val string) *string { + return &val +} + +func SetString(modified *bool, existing *string, required string) { + if required != *existing { + *existing = required + *modified = true + } +} + +func SetStringIfSet(modified *bool, existing *string, required string) { + if len(required) == 0 { + return + } + if required != *existing { + *existing = required + *modified = true + } +} + +func setStringPtr(modified *bool, existing **string, required *string) { + if *existing == nil || (required == nil && *existing != nil) { + *modified = true + *existing = required + return + } + SetString(modified, *existing, *required) +} + +func SetStringSlice(modified *bool, existing *[]string, required []string) { + if !reflect.DeepEqual(required, *existing) { + *existing = required + *modified = true + } +} + +func SetStringSliceIfSet(modified *bool, existing *[]string, required []string) { + if required == nil { + return + } + if !reflect.DeepEqual(required, *existing) { + *existing = required + *modified = true + } +} + +func BoolPtr(val bool) *bool { + return &val +} + +func SetBool(modified *bool, existing *bool, required bool) { + if required != *existing { + *existing = required + *modified = true + } +} + +func setBoolPtr(modified *bool, existing **bool, required *bool) { + if *existing == nil || (required == nil && *existing != nil) { + *modified = true + *existing = required + return + } + SetBool(modified, *existing, *required) +} + +func int64Ptr(val int64) *int64 { + return &val +} + +func SetInt32(modified *bool, existing *int32, required int32) { + if required != *existing { + *existing = required + *modified = true + } +} + +func SetInt32IfSet(modified *bool, existing *int32, required int32) { + if required == 0 { + return + } + + SetInt32(modified, existing, required) +} + +func SetInt64(modified *bool, existing *int64, required int64) { + if required != *existing { + *existing = required + *modified = true + } +} + +func setInt64Ptr(modified *bool, existing **int64, required *int64) { + if *existing == nil || (required == nil && *existing != nil) { + *modified = true + *existing = required + return + } + SetInt64(modified, *existing, *required) +} + +func MergeMap(modified *bool, existing *map[string]string, required map[string]string) { + if *existing == nil { + *existing = map[string]string{} + } + for k, v := range required { + actualKey := k + removeKey := false + + // if "required" map contains a key with "-" as suffix, remove that + // key from the existing map instead of replacing the value + if strings.HasSuffix(k, "-") { + removeKey = true + actualKey = strings.TrimRight(k, "-") + } + + if existingV, ok := (*existing)[actualKey]; removeKey { + if !ok { + continue + } + // value found -> it should be removed + delete(*existing, actualKey) + *modified = true + + } else if !ok || v != existingV { + *modified = true + (*existing)[actualKey] = v + } + } +} + +func SetMapStringString(modified *bool, existing *map[string]string, required map[string]string) { + if *existing == nil { + *existing = map[string]string{} + } + + if !reflect.DeepEqual(*existing, required) { + *existing = required + } +} + +func SetMapStringStringIfSet(modified *bool, existing *map[string]string, required map[string]string) { + if required == nil { + return + } + if *existing == nil { + *existing = map[string]string{} + } + + if !reflect.DeepEqual(*existing, required) { + *existing = required + } +} + +func MergeOwnerRefs(modified *bool, existing *[]metav1.OwnerReference, required []metav1.OwnerReference) { + if *existing == nil { + *existing = []metav1.OwnerReference{} + } + + for _, o := range required { + removeOwner := false + + // if "required" ownerRefs contain an owner.UID with "-" as suffix, remove that + // ownerRef from the existing ownerRefs instead of replacing the value + // NOTE: this is the same format as kubectl annotate and kubectl label + if strings.HasSuffix(string(o.UID), "-") { + removeOwner = true + } + + existedIndex := 0 + + for existedIndex < len(*existing) { + if ownerRefMatched(o, (*existing)[existedIndex]) { + break + } + existedIndex++ + } + + if existedIndex == len(*existing) { + // There is no matched ownerref found, append the ownerref + // if it is not to be removed. + if !removeOwner { + *existing = append(*existing, o) + *modified = true + } + continue + } + + if removeOwner { + *existing = append((*existing)[:existedIndex], (*existing)[existedIndex+1:]...) + *modified = true + continue + } + + if !reflect.DeepEqual(o, (*existing)[existedIndex]) { + (*existing)[existedIndex] = o + *modified = true + } + } +} + +func ownerRefMatched(existing, required metav1.OwnerReference) bool { + if existing.Name != required.Name { + return false + } + + if existing.Kind != required.Kind { + return false + } + + existingGV, err := schema.ParseGroupVersion(existing.APIVersion) + + if err != nil { + return false + } + + requiredGV, err := schema.ParseGroupVersion(required.APIVersion) + + if err != nil { + return false + } + + if existingGV.Group != requiredGV.Group { + return false + } + + return true +} + +func cleanRemovalOwnerRefs(required []metav1.OwnerReference) []metav1.OwnerReference { + for k := 0; k < len(required); k++ { + if strings.HasSuffix(string(required[k].UID), "-") { + required = append(required[:k], required[k+1:]...) + k-- + } + } + return required +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/admission.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/admission.go new file mode 100644 index 000000000..7c69478ea --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/admission.go @@ -0,0 +1,35 @@ +package resourceread + +import ( + admissionv1 "k8s.io/api/admissionregistration/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var ( + admissionScheme = runtime.NewScheme() + admissionCodecs = serializer.NewCodecFactory(admissionScheme) +) + +func init() { + utilruntime.Must(admissionv1.AddToScheme(admissionScheme)) +} + +func ReadValidatingWebhookConfigurationV1OrDie(objBytes []byte) *admissionv1.ValidatingWebhookConfiguration { + requiredObj, err := runtime.Decode(admissionCodecs.UniversalDecoder(admissionv1.SchemeGroupVersion), objBytes) + if err != nil { + panic(err) + } + + return requiredObj.(*admissionv1.ValidatingWebhookConfiguration) +} + +func ReadMutatingWebhookConfigurationV1OrDie(objBytes []byte) *admissionv1.MutatingWebhookConfiguration { + requiredObj, err := runtime.Decode(admissionCodecs.UniversalDecoder(admissionv1.SchemeGroupVersion), objBytes) + if err != nil { + panic(err) + } + + return requiredObj.(*admissionv1.MutatingWebhookConfiguration) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/apiextensions.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/apiextensions.go new file mode 100644 index 000000000..e21f774e1 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/apiextensions.go @@ -0,0 +1,35 @@ +package resourceread + +import ( + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var ( + apiExtensionsScheme = runtime.NewScheme() + apiExtensionsCodecs = serializer.NewCodecFactory(apiExtensionsScheme) +) + +func init() { + utilruntime.Must(apiextensionsv1beta1.AddToScheme(apiExtensionsScheme)) + utilruntime.Must(apiextensionsv1.AddToScheme(apiExtensionsScheme)) +} + +func ReadCustomResourceDefinitionV1Beta1OrDie(objBytes []byte) *apiextensionsv1beta1.CustomResourceDefinition { + requiredObj, err := runtime.Decode(apiExtensionsCodecs.UniversalDecoder(apiextensionsv1beta1.SchemeGroupVersion), objBytes) + if err != nil { + panic(err) + } + return requiredObj.(*apiextensionsv1beta1.CustomResourceDefinition) +} + +func ReadCustomResourceDefinitionV1OrDie(objBytes []byte) *apiextensionsv1.CustomResourceDefinition { + requiredObj, err := runtime.Decode(apiExtensionsCodecs.UniversalDecoder(apiextensionsv1.SchemeGroupVersion), objBytes) + if err != nil { + panic(err) + } + return requiredObj.(*apiextensionsv1.CustomResourceDefinition) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/apps.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/apps.go new file mode 100644 index 000000000..8490017e1 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/apps.go @@ -0,0 +1,34 @@ +package resourceread + +import ( + appsv1 "k8s.io/api/apps/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" +) + +var ( + appsScheme = runtime.NewScheme() + appsCodecs = serializer.NewCodecFactory(appsScheme) +) + +func init() { + if err := appsv1.AddToScheme(appsScheme); err != nil { + panic(err) + } +} + +func ReadDeploymentV1OrDie(objBytes []byte) *appsv1.Deployment { + requiredObj, err := runtime.Decode(appsCodecs.UniversalDecoder(appsv1.SchemeGroupVersion), objBytes) + if err != nil { + panic(err) + } + return requiredObj.(*appsv1.Deployment) +} + +func ReadDaemonSetV1OrDie(objBytes []byte) *appsv1.DaemonSet { + requiredObj, err := runtime.Decode(appsCodecs.UniversalDecoder(appsv1.SchemeGroupVersion), objBytes) + if err != nil { + panic(err) + } + return requiredObj.(*appsv1.DaemonSet) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/core.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/core.go new file mode 100644 index 000000000..daa27c7b5 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/core.go @@ -0,0 +1,78 @@ +package resourceread + +import ( + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" +) + +var ( + coreScheme = runtime.NewScheme() + coreCodecs = serializer.NewCodecFactory(coreScheme) +) + +func init() { + if err := corev1.AddToScheme(coreScheme); err != nil { + panic(err) + } +} + +func ReadConfigMapV1OrDie(objBytes []byte) *corev1.ConfigMap { + requiredObj, err := runtime.Decode(coreCodecs.UniversalDecoder(corev1.SchemeGroupVersion), objBytes) + if err != nil { + panic(err) + } + return requiredObj.(*corev1.ConfigMap) +} + +func ReadSecretV1OrDie(objBytes []byte) *corev1.Secret { + requiredObj, err := runtime.Decode(coreCodecs.UniversalDecoder(corev1.SchemeGroupVersion), objBytes) + if err != nil { + panic(err) + } + return requiredObj.(*corev1.Secret) +} + +func ReadNamespaceV1OrDie(objBytes []byte) *corev1.Namespace { + requiredObj, err := runtime.Decode(coreCodecs.UniversalDecoder(corev1.SchemeGroupVersion), objBytes) + if err != nil { + panic(err) + } + return requiredObj.(*corev1.Namespace) +} + +func ReadServiceAccountV1OrDie(objBytes []byte) *corev1.ServiceAccount { + requiredObj, err := runtime.Decode(coreCodecs.UniversalDecoder(corev1.SchemeGroupVersion), objBytes) + if err != nil { + panic(err) + } + return requiredObj.(*corev1.ServiceAccount) +} + +func ReadServiceV1OrDie(objBytes []byte) *corev1.Service { + requiredObj, err := runtime.Decode(coreCodecs.UniversalDecoder(corev1.SchemeGroupVersion), objBytes) + if err != nil { + panic(err) + } + return requiredObj.(*corev1.Service) +} + +func ReadPodV1OrDie(objBytes []byte) *corev1.Pod { + requiredObj, err := ReadPodV1(objBytes) + if err != nil { + panic(err) + } + return requiredObj +} + +func ReadPodV1(objBytes []byte) (*corev1.Pod, error) { + requiredObj, err := runtime.Decode(coreCodecs.UniversalDecoder(corev1.SchemeGroupVersion), objBytes) + if err != nil { + return nil, err + } + return requiredObj.(*corev1.Pod), nil +} + +func WritePodV1OrDie(obj *corev1.Pod) string { + return runtime.EncodeOrDie(coreCodecs.LegacyCodec(corev1.SchemeGroupVersion), obj) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/generic.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/generic.go new file mode 100644 index 000000000..b62fb2b64 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/generic.go @@ -0,0 +1,57 @@ +package resourceread + +import ( + "github.com/openshift/api" + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + apiextensionsv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/kubernetes/scheme" + migrationv1alpha1 "sigs.k8s.io/kube-storage-version-migrator/pkg/apis/migration/v1alpha1" +) + +var ( + genericScheme = runtime.NewScheme() + genericCodecs = serializer.NewCodecFactory(genericScheme) + genericCodec = genericCodecs.UniversalDeserializer() +) + +func init() { + utilruntime.Must(api.Install(genericScheme)) + utilruntime.Must(api.InstallKube(genericScheme)) + utilruntime.Must(apiextensionsv1beta1.AddToScheme(genericScheme)) + utilruntime.Must(apiextensionsv1.AddToScheme(genericScheme)) + utilruntime.Must(migrationv1alpha1.AddToScheme(genericScheme)) + utilruntime.Must(admissionregistrationv1.AddToScheme(genericScheme)) +} + +// ReadGenericWithUnstructured parses given yaml file using known scheme (see genericScheme above). +// If the object kind is not registered in the scheme, it returns Unstructured as the last resort. +func ReadGenericWithUnstructured(objBytes []byte) (runtime.Object, error) { + // Try to get a typed object first + typedObj, _, decodeErr := genericCodec.Decode(objBytes, nil, nil) + if decodeErr == nil { + return typedObj, nil + } + + // Try unstructured, hoping to recover from "no kind XXX is registered for version YYY" + unstructuredObj, _, err := scheme.Codecs.UniversalDecoder().Decode(objBytes, nil, &unstructured.Unstructured{}) + if err != nil { + // Return the original error + return nil, decodeErr + } + return unstructuredObj, nil +} + +// ReadGenericWithUnstructuredOrDie parses given yaml file using known scheme (see genericScheme above). +// If the object kind is not registered in the scheme, it returns Unstructured as the last resort. +func ReadGenericWithUnstructuredOrDie(objBytes []byte) runtime.Object { + obj, err := ReadGenericWithUnstructured(objBytes) + if err != nil { + panic(err) + } + return obj +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/images.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/images.go new file mode 100644 index 000000000..62a80d128 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/images.go @@ -0,0 +1,26 @@ +package resourceread + +import ( + imagev1 "github.com/openshift/api/image/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" +) + +var ( + imagesScheme = runtime.NewScheme() + imagesCodecs = serializer.NewCodecFactory(imagesScheme) +) + +func init() { + if err := imagev1.AddToScheme(imagesScheme); err != nil { + panic(err) + } +} + +func ReadImageStreamV1OrDie(objBytes []byte) *imagev1.ImageStream { + requiredObj, err := runtime.Decode(imagesCodecs.UniversalDecoder(imagev1.SchemeGroupVersion), objBytes) + if err != nil { + panic(err) + } + return requiredObj.(*imagev1.ImageStream) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/migration.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/migration.go new file mode 100644 index 000000000..71b6074c9 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/migration.go @@ -0,0 +1,26 @@ +package resourceread + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + migrationv1alpha1 "sigs.k8s.io/kube-storage-version-migrator/pkg/apis/migration/v1alpha1" +) + +var ( + migrationScheme = runtime.NewScheme() + migrationCodecs = serializer.NewCodecFactory(migrationScheme) +) + +func init() { + if err := migrationv1alpha1.AddToScheme(migrationScheme); err != nil { + panic(err) + } +} + +func ReadStorageVersionMigrationV1Alpha1OrDie(objBytes []byte) *migrationv1alpha1.StorageVersionMigration { + requiredObj, err := runtime.Decode(migrationCodecs.UniversalDecoder(migrationv1alpha1.SchemeGroupVersion), objBytes) + if err != nil { + panic(err) + } + return requiredObj.(*migrationv1alpha1.StorageVersionMigration) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/policy.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/policy.go new file mode 100644 index 000000000..fe058fdc6 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/policy.go @@ -0,0 +1,25 @@ +package resourceread + +import ( + policyv1 "k8s.io/api/policy/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var ( + policyScheme = runtime.NewScheme() + policyCodecs = serializer.NewCodecFactory(policyScheme) +) + +func init() { + utilruntime.Must(policyv1.AddToScheme(policyScheme)) +} + +func ReadPodDisruptionBudgetV1OrDie(objBytes []byte) *policyv1.PodDisruptionBudget { + requiredObj, err := runtime.Decode(policyCodecs.UniversalDecoder(policyv1.SchemeGroupVersion), objBytes) + if err != nil { + panic(err) + } + return requiredObj.(*policyv1.PodDisruptionBudget) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/rbac.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/rbac.go new file mode 100644 index 000000000..bf14899d8 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/rbac.go @@ -0,0 +1,50 @@ +package resourceread + +import ( + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" +) + +var ( + rbacScheme = runtime.NewScheme() + rbacCodecs = serializer.NewCodecFactory(rbacScheme) +) + +func init() { + if err := rbacv1.AddToScheme(rbacScheme); err != nil { + panic(err) + } +} + +func ReadClusterRoleBindingV1OrDie(objBytes []byte) *rbacv1.ClusterRoleBinding { + requiredObj, err := runtime.Decode(rbacCodecs.UniversalDecoder(rbacv1.SchemeGroupVersion), objBytes) + if err != nil { + panic(err) + } + return requiredObj.(*rbacv1.ClusterRoleBinding) +} + +func ReadClusterRoleV1OrDie(objBytes []byte) *rbacv1.ClusterRole { + requiredObj, err := runtime.Decode(rbacCodecs.UniversalDecoder(rbacv1.SchemeGroupVersion), objBytes) + if err != nil { + panic(err) + } + return requiredObj.(*rbacv1.ClusterRole) +} + +func ReadRoleBindingV1OrDie(objBytes []byte) *rbacv1.RoleBinding { + requiredObj, err := runtime.Decode(rbacCodecs.UniversalDecoder(rbacv1.SchemeGroupVersion), objBytes) + if err != nil { + panic(err) + } + return requiredObj.(*rbacv1.RoleBinding) +} + +func ReadRoleV1OrDie(objBytes []byte) *rbacv1.Role { + requiredObj, err := runtime.Decode(rbacCodecs.UniversalDecoder(rbacv1.SchemeGroupVersion), objBytes) + if err != nil { + panic(err) + } + return requiredObj.(*rbacv1.Role) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/route.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/route.go new file mode 100644 index 000000000..08e125892 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/route.go @@ -0,0 +1,26 @@ +package resourceread + +import ( + routev1 "github.com/openshift/api/route/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" +) + +var ( + routeScheme = runtime.NewScheme() + routeCodecs = serializer.NewCodecFactory(routeScheme) +) + +func init() { + if err := routev1.AddToScheme(routeScheme); err != nil { + panic(err) + } +} + +func ReadRouteV1OrDie(objBytes []byte) *routev1.Route { + requiredObj, err := runtime.Decode(routeCodecs.UniversalDecoder(routev1.SchemeGroupVersion), objBytes) + if err != nil { + panic(err) + } + return requiredObj.(*routev1.Route) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/storage.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/storage.go new file mode 100644 index 000000000..6a7d51ee7 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/storage.go @@ -0,0 +1,43 @@ +package resourceread + +import ( + storagev1 "k8s.io/api/storage/v1" + storagev1beta1 "k8s.io/api/storage/v1beta1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var ( + storageScheme = runtime.NewScheme() + storageCodecs = serializer.NewCodecFactory(storageScheme) +) + +func init() { + utilruntime.Must(storagev1.AddToScheme(storageScheme)) + utilruntime.Must(storagev1beta1.AddToScheme(storageScheme)) +} + +func ReadStorageClassV1OrDie(objBytes []byte) *storagev1.StorageClass { + requiredObj, err := runtime.Decode(storageCodecs.UniversalDecoder(storagev1.SchemeGroupVersion), objBytes) + if err != nil { + panic(err) + } + return requiredObj.(*storagev1.StorageClass) +} + +func ReadCSIDriverV1Beta1OrDie(objBytes []byte) *storagev1beta1.CSIDriver { + requiredObj, err := runtime.Decode(storageCodecs.UniversalDecoder(storagev1beta1.SchemeGroupVersion), objBytes) + if err != nil { + panic(err) + } + return requiredObj.(*storagev1beta1.CSIDriver) +} + +func ReadCSIDriverV1OrDie(objBytes []byte) *storagev1.CSIDriver { + requiredObj, err := runtime.Decode(storageCodecs.UniversalDecoder(storagev1.SchemeGroupVersion), objBytes) + if err != nil { + panic(err) + } + return requiredObj.(*storagev1.CSIDriver) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/unstructured.go b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/unstructured.go new file mode 100644 index 000000000..bf6bfb010 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resource/resourceread/unstructured.go @@ -0,0 +1,18 @@ +package resourceread + +import ( + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/client-go/kubernetes/scheme" +) + +func ReadCredentialRequestsOrDie(objBytes []byte) *unstructured.Unstructured { + return ReadUnstructuredOrDie(objBytes) +} + +func ReadUnstructuredOrDie(objBytes []byte) *unstructured.Unstructured { + udi, _, err := scheme.Codecs.UniversalDecoder().Decode(objBytes, nil, &unstructured.Unstructured{}) + if err != nil { + panic(err) + } + return udi.(*unstructured.Unstructured) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resourcesynccontroller/core.go b/vendor/github.com/openshift/library-go/pkg/operator/resourcesynccontroller/core.go new file mode 100644 index 000000000..f5a26338b --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resourcesynccontroller/core.go @@ -0,0 +1,67 @@ +package resourcesynccontroller + +import ( + "crypto/x509" + "fmt" + "reflect" + + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + corev1listers "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/util/cert" + + "github.com/openshift/library-go/pkg/crypto" +) + +func CombineCABundleConfigMaps(destinationConfigMap ResourceLocation, lister corev1listers.ConfigMapLister, inputConfigMaps ...ResourceLocation) (*corev1.ConfigMap, error) { + certificates := []*x509.Certificate{} + for _, input := range inputConfigMaps { + inputConfigMap, err := lister.ConfigMaps(input.Namespace).Get(input.Name) + if apierrors.IsNotFound(err) { + continue + } + if err != nil { + return nil, err + } + + // configmaps must conform to this + inputContent := inputConfigMap.Data["ca-bundle.crt"] + if len(inputContent) == 0 { + continue + } + inputCerts, err := cert.ParseCertsPEM([]byte(inputContent)) + if err != nil { + return nil, fmt.Errorf("configmap/%s in %q is malformed: %v", input.Name, input.Namespace, err) + } + certificates = append(certificates, inputCerts...) + } + + certificates = crypto.FilterExpiredCerts(certificates...) + finalCertificates := []*x509.Certificate{} + // now check for duplicates. n^2, but super simple + for i := range certificates { + found := false + for j := range finalCertificates { + if reflect.DeepEqual(certificates[i].Raw, finalCertificates[j].Raw) { + found = true + break + } + } + if !found { + finalCertificates = append(finalCertificates, certificates[i]) + } + } + + caBytes, err := crypto.EncodeCertificates(finalCertificates...) + if err != nil { + return nil, err + } + + return &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{Namespace: destinationConfigMap.Namespace, Name: destinationConfigMap.Name}, + Data: map[string]string{ + "ca-bundle.crt": string(caBytes), + }, + }, nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resourcesynccontroller/interfaces.go b/vendor/github.com/openshift/library-go/pkg/operator/resourcesynccontroller/interfaces.go new file mode 100644 index 000000000..c53af8bdf --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resourcesynccontroller/interfaces.go @@ -0,0 +1,41 @@ +package resourcesynccontroller + +import "k8s.io/apimachinery/pkg/util/sets" + +// ResourceLocation describes coordinates for a resource to be synced +type ResourceLocation struct { + Namespace string `json:"namespace"` + Name string `json:"name"` + + // Provider if set for the source location enhance the error message to point to the component which + // provide this resource. + Provider string `json:"provider,omitempty"` +} + +// PreconditionsFulfilled is a function that indicates whether all prerequisites +// are met and a resource can be synced. +type preconditionsFulfilled func() (bool, error) + +func alwaysFulfilledPreconditions() (bool, error) { return true, nil } + +type syncRuleSource struct { + ResourceLocation + syncedKeys sets.String // defines the set of keys to sync from source to dest + preconditionsFulfilledFn preconditionsFulfilled // preconditions to fulfill before syncing the resource +} + +type syncRules map[ResourceLocation]syncRuleSource + +var ( + emptyResourceLocation = ResourceLocation{} +) + +// ResourceSyncer allows changes to syncing rules by this controller +type ResourceSyncer interface { + // SyncConfigMap indicates that a configmap should be copied from the source to the destination. It will also + // mirror a deletion from the source. If the source is a zero object the destination will be deleted. + SyncConfigMap(destination, source ResourceLocation) error + // SyncSecret indicates that a secret should be copied from the source to the destination. It will also + // mirror a deletion from the source. If the source is a zero object the destination will be deleted. + SyncSecret(destination, source ResourceLocation) error +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/resourcesynccontroller/resourcesync_controller.go b/vendor/github.com/openshift/library-go/pkg/operator/resourcesynccontroller/resourcesync_controller.go new file mode 100644 index 000000000..02cdedb17 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/resourcesynccontroller/resourcesync_controller.go @@ -0,0 +1,340 @@ +package resourcesynccontroller + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "sort" + "strings" + "sync" + "time" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/sets" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + + operatorv1 "github.com/openshift/api/operator/v1" + + "github.com/openshift/library-go/pkg/controller/factory" + "github.com/openshift/library-go/pkg/operator/condition" + "github.com/openshift/library-go/pkg/operator/events" + "github.com/openshift/library-go/pkg/operator/management" + "github.com/openshift/library-go/pkg/operator/resource/resourceapply" + "github.com/openshift/library-go/pkg/operator/v1helpers" +) + +// ResourceSyncController is a controller that will copy source configmaps and secrets to their destinations. +// It will also mirror deletions by deleting destinations. +type ResourceSyncController struct { + name string + // syncRuleLock is used to ensure we avoid races on changes to syncing rules + syncRuleLock sync.RWMutex + // configMapSyncRules is a map from destination location to source location + configMapSyncRules syncRules + // secretSyncRules is a map from destination location to source location + secretSyncRules syncRules + + // knownNamespaces is the list of namespaces we are watching. + knownNamespaces sets.String + + configMapGetter corev1client.ConfigMapsGetter + secretGetter corev1client.SecretsGetter + kubeInformersForNamespaces v1helpers.KubeInformersForNamespaces + operatorConfigClient v1helpers.OperatorClient + + runFn func(ctx context.Context, workers int) + syncCtx factory.SyncContext +} + +var _ ResourceSyncer = &ResourceSyncController{} +var _ factory.Controller = &ResourceSyncController{} + +// NewResourceSyncController creates ResourceSyncController. +func NewResourceSyncController( + operatorConfigClient v1helpers.OperatorClient, + kubeInformersForNamespaces v1helpers.KubeInformersForNamespaces, + secretsGetter corev1client.SecretsGetter, + configMapsGetter corev1client.ConfigMapsGetter, + eventRecorder events.Recorder, +) *ResourceSyncController { + c := &ResourceSyncController{ + name: "ResourceSyncController", + operatorConfigClient: operatorConfigClient, + + configMapSyncRules: syncRules{}, + secretSyncRules: syncRules{}, + kubeInformersForNamespaces: kubeInformersForNamespaces, + knownNamespaces: kubeInformersForNamespaces.Namespaces(), + + configMapGetter: v1helpers.CachedConfigMapGetter(configMapsGetter, kubeInformersForNamespaces), + secretGetter: v1helpers.CachedSecretGetter(secretsGetter, kubeInformersForNamespaces), + syncCtx: factory.NewSyncContext("ResourceSyncController", eventRecorder.WithComponentSuffix("resource-sync-controller")), + } + + informers := []factory.Informer{ + operatorConfigClient.Informer(), + } + for namespace := range kubeInformersForNamespaces.Namespaces() { + if len(namespace) == 0 { + continue + } + informer := kubeInformersForNamespaces.InformersFor(namespace) + informers = append(informers, informer.Core().V1().ConfigMaps().Informer()) + informers = append(informers, informer.Core().V1().Secrets().Informer()) + } + + f := factory.New().WithSync(c.Sync).WithSyncContext(c.syncCtx).WithInformers(informers...).ResyncEvery(time.Minute).ToController(c.name, eventRecorder.WithComponentSuffix("resource-sync-controller")) + c.runFn = f.Run + + return c +} + +func (c *ResourceSyncController) Run(ctx context.Context, workers int) { + c.runFn(ctx, workers) +} + +func (c *ResourceSyncController) Name() string { + return c.name +} + +func (c *ResourceSyncController) SyncConfigMap(destination, source ResourceLocation) error { + return c.syncConfigMap(destination, source, alwaysFulfilledPreconditions) +} + +func (c *ResourceSyncController) SyncPartialConfigMap(destination ResourceLocation, source ResourceLocation, keys ...string) error { + return c.syncConfigMap(destination, source, alwaysFulfilledPreconditions, keys...) +} + +// SyncConfigMapConditionally adds a new configmap that the resource sync +// controller will synchronise if the given precondition is fulfilled. +func (c *ResourceSyncController) SyncConfigMapConditionally(destination, source ResourceLocation, preconditionsFulfilledFn preconditionsFulfilled) error { + return c.syncConfigMap(destination, source, preconditionsFulfilledFn) +} + +func (c *ResourceSyncController) syncConfigMap(destination ResourceLocation, source ResourceLocation, preconditionsFulfilledFn preconditionsFulfilled, keys ...string) error { + if !c.knownNamespaces.Has(destination.Namespace) { + return fmt.Errorf("not watching namespace %q", destination.Namespace) + } + if source != emptyResourceLocation && !c.knownNamespaces.Has(source.Namespace) { + return fmt.Errorf("not watching namespace %q", source.Namespace) + } + + c.syncRuleLock.Lock() + defer c.syncRuleLock.Unlock() + c.configMapSyncRules[destination] = syncRuleSource{ + ResourceLocation: source, + syncedKeys: sets.NewString(keys...), + preconditionsFulfilledFn: preconditionsFulfilledFn, + } + + // make sure the new rule is picked up + c.syncCtx.Queue().Add(c.syncCtx.QueueKey()) + return nil +} + +func (c *ResourceSyncController) SyncSecret(destination, source ResourceLocation) error { + return c.syncSecret(destination, source, alwaysFulfilledPreconditions) +} + +func (c *ResourceSyncController) SyncPartialSecret(destination, source ResourceLocation, keys ...string) error { + return c.syncSecret(destination, source, alwaysFulfilledPreconditions, keys...) +} + +// SyncSecretConditionally adds a new secret that the resource sync controller +// will synchronise if the given precondition is fulfilled. +func (c *ResourceSyncController) SyncSecretConditionally(destination, source ResourceLocation, preconditionsFulfilledFn preconditionsFulfilled) error { + return c.syncSecret(destination, source, preconditionsFulfilledFn) +} + +func (c *ResourceSyncController) syncSecret(destination, source ResourceLocation, preconditionsFulfilledFn preconditionsFulfilled, keys ...string) error { + if !c.knownNamespaces.Has(destination.Namespace) { + return fmt.Errorf("not watching namespace %q", destination.Namespace) + } + if source != emptyResourceLocation && !c.knownNamespaces.Has(source.Namespace) { + return fmt.Errorf("not watching namespace %q", source.Namespace) + } + + c.syncRuleLock.Lock() + defer c.syncRuleLock.Unlock() + c.secretSyncRules[destination] = syncRuleSource{ + ResourceLocation: source, + syncedKeys: sets.NewString(keys...), + preconditionsFulfilledFn: preconditionsFulfilledFn, + } + + // make sure the new rule is picked up + c.syncCtx.Queue().Add(c.syncCtx.QueueKey()) + return nil +} + +// errorWithProvider provides a finger of blame in case a source resource cannot be retrieved. +func errorWithProvider(provider string, err error) error { + if len(provider) > 0 { + return fmt.Errorf("%w (check the %q that is supposed to provide this resource)", err, provider) + } + return err +} + +func (c *ResourceSyncController) Sync(ctx context.Context, syncCtx factory.SyncContext) error { + operatorSpec, _, _, err := c.operatorConfigClient.GetOperatorState() + if err != nil { + return err + } + + if !management.IsOperatorManaged(operatorSpec.ManagementState) { + return nil + } + + c.syncRuleLock.RLock() + defer c.syncRuleLock.RUnlock() + + errors := []error{} + + for destination, source := range c.configMapSyncRules { + // skip the sync if the preconditions aren't fulfilled + if fulfilled, err := source.preconditionsFulfilledFn(); !fulfilled || err != nil { + if err != nil { + errors = append(errors, err) + } + continue + } + + if source.ResourceLocation == emptyResourceLocation { + // use the cache to check whether the configmap exists in target namespace, if not skip the extra delete call. + if _, err := c.configMapGetter.ConfigMaps(destination.Namespace).Get(ctx, destination.Name, metav1.GetOptions{}); err != nil { + if !apierrors.IsNotFound(err) { + errors = append(errors, err) + } + continue + } + if err := c.configMapGetter.ConfigMaps(destination.Namespace).Delete(ctx, destination.Name, metav1.DeleteOptions{}); err != nil && !apierrors.IsNotFound(err) { + errors = append(errors, err) + } + continue + } + + _, _, err := resourceapply.SyncPartialConfigMap(ctx, c.configMapGetter, syncCtx.Recorder(), source.Namespace, source.Name, destination.Namespace, destination.Name, source.syncedKeys, []metav1.OwnerReference{}) + if err != nil { + errors = append(errors, errorWithProvider(source.Provider, err)) + } + } + for destination, source := range c.secretSyncRules { + // skip the sync if the preconditions aren't fulfilled + if fulfilled, err := source.preconditionsFulfilledFn(); !fulfilled || err != nil { + if err != nil { + errors = append(errors, err) + } + continue + } + + if source.ResourceLocation == emptyResourceLocation { + // use the cache to check whether the secret exists in target namespace, if not skip the extra delete call. + if _, err := c.secretGetter.Secrets(destination.Namespace).Get(ctx, destination.Name, metav1.GetOptions{}); err != nil { + if !apierrors.IsNotFound(err) { + errors = append(errors, err) + } + continue + } + if err := c.secretGetter.Secrets(destination.Namespace).Delete(ctx, destination.Name, metav1.DeleteOptions{}); err != nil && !apierrors.IsNotFound(err) { + errors = append(errors, err) + } + continue + } + + _, _, err := resourceapply.SyncPartialSecret(ctx, c.secretGetter, syncCtx.Recorder(), source.Namespace, source.Name, destination.Namespace, destination.Name, source.syncedKeys, []metav1.OwnerReference{}) + if err != nil { + errors = append(errors, errorWithProvider(source.Provider, err)) + } + } + + if len(errors) > 0 { + cond := operatorv1.OperatorCondition{ + Type: condition.ResourceSyncControllerDegradedConditionType, + Status: operatorv1.ConditionTrue, + Reason: "Error", + Message: v1helpers.NewMultiLineAggregate(errors).Error(), + } + if _, _, updateError := v1helpers.UpdateStatus(ctx, c.operatorConfigClient, v1helpers.UpdateConditionFn(cond)); updateError != nil { + return updateError + } + return nil + } + + cond := operatorv1.OperatorCondition{ + Type: condition.ResourceSyncControllerDegradedConditionType, + Status: operatorv1.ConditionFalse, + } + if _, _, updateError := v1helpers.UpdateStatus(ctx, c.operatorConfigClient, v1helpers.UpdateConditionFn(cond)); updateError != nil { + return updateError + } + return nil +} + +func NewDebugHandler(controller *ResourceSyncController) http.Handler { + return &debugHTTPHandler{controller: controller} +} + +type debugHTTPHandler struct { + controller *ResourceSyncController +} + +type ResourceSyncRule struct { + Destination ResourceLocation `json:"destination"` + Source syncRuleSource `json:"source"` +} + +type ResourceSyncRuleList []ResourceSyncRule + +func (l ResourceSyncRuleList) Len() int { return len(l) } +func (l ResourceSyncRuleList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } +func (l ResourceSyncRuleList) Less(i, j int) bool { + if strings.Compare(l[i].Source.Namespace, l[j].Source.Namespace) < 0 { + return true + } + if strings.Compare(l[i].Source.Namespace, l[j].Source.Namespace) > 0 { + return false + } + if strings.Compare(l[i].Source.Name, l[j].Source.Name) < 0 { + return true + } + return false +} + +type ControllerSyncRules struct { + Secrets ResourceSyncRuleList `json:"secrets"` + Configs ResourceSyncRuleList `json:"configs"` +} + +// ServeSyncRules provides a handler function to return the sync rules of the controller +func (h *debugHTTPHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { + syncRules := ControllerSyncRules{ResourceSyncRuleList{}, ResourceSyncRuleList{}} + + h.controller.syncRuleLock.RLock() + defer h.controller.syncRuleLock.RUnlock() + syncRules.Secrets = append(syncRules.Secrets, resourceSyncRuleList(h.controller.secretSyncRules)...) + syncRules.Configs = append(syncRules.Configs, resourceSyncRuleList(h.controller.configMapSyncRules)...) + + data, err := json.Marshal(syncRules) + if err != nil { + w.Write([]byte(err.Error())) + w.WriteHeader(http.StatusInternalServerError) + return + } + w.Write(data) + w.WriteHeader(http.StatusOK) +} + +func resourceSyncRuleList(syncRules syncRules) ResourceSyncRuleList { + rules := make(ResourceSyncRuleList, 0, len(syncRules)) + for dest, src := range syncRules { + rule := ResourceSyncRule{ + Source: src, + Destination: dest, + } + rules = append(rules, rule) + } + sort.Sort(rules) + return rules +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/args.go b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/args.go new file mode 100644 index 000000000..e1a165e63 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/args.go @@ -0,0 +1,61 @@ +package v1helpers + +import ( + "fmt" + "sort" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" +) + +// FlagsFromUnstructured process the unstructured arguments usually retrieved from an operator's configuration file under a specific key. +// There are only two supported/valid types for arguments, that is []sting and/or string. +// Passing a different type yield an error. +// +// Use ToFlagSlice function to get a slice of string flags. +func FlagsFromUnstructured(unstructuredArgs map[string]interface{}) (map[string][]string, error) { + return flagsFromUnstructured(unstructuredArgs) +} + +// ToFlagSlice transforms the provided arguments to a slice of string flags. +// A flag name is taken directly from the key and the value is simply attached. +// A flag is repeated iff it has more than one value. +func ToFlagSlice(args map[string][]string) []string { + var keys []string + for key := range args { + keys = append(keys, key) + } + sort.Strings(keys) + + var flags []string + for _, key := range keys { + for _, token := range args[key] { + flags = append(flags, fmt.Sprintf("--%s=%s", key, token)) + } + } + return flags +} + +// flagsFromUnstructured process the unstructured arguments (interface{}) to a map of strings. +// There are only two supported/valid types for arguments, that is []sting and/or string. +// Passing a different type yield an error. +func flagsFromUnstructured(unstructuredArgs map[string]interface{}) (map[string][]string, error) { + ret := map[string][]string{} + for argName, argRawValue := range unstructuredArgs { + var argsSlice []string + var found bool + var err error + + argsSlice, found, err = unstructured.NestedStringSlice(unstructuredArgs, argName) + if !found || err != nil { + str, found, err := unstructured.NestedString(unstructuredArgs, argName) + if !found || err != nil { + return nil, fmt.Errorf("unable to process an argument, incorrect value %v under %v key, expected []string or string", argRawValue, argName) + } + argsSlice = append(argsSlice, str) + } + + ret[argName] = argsSlice + } + + return ret, nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/core_getters.go b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/core_getters.go new file mode 100644 index 000000000..bdfe17d92 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/core_getters.go @@ -0,0 +1,127 @@ +package v1helpers + +import ( + "context" + "fmt" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + corev1client "k8s.io/client-go/kubernetes/typed/core/v1" + corev1listers "k8s.io/client-go/listers/core/v1" +) + +var ( + emptyGetOptions = metav1.GetOptions{} + emptyListOptions = metav1.ListOptions{} +) + +type combinedConfigMapGetter struct { + client corev1client.ConfigMapsGetter + listers KubeInformersForNamespaces +} + +func CachedConfigMapGetter(client corev1client.ConfigMapsGetter, listers KubeInformersForNamespaces) corev1client.ConfigMapsGetter { + return &combinedConfigMapGetter{ + client: client, + listers: listers, + } +} + +type combinedConfigMapInterface struct { + corev1client.ConfigMapInterface + lister corev1listers.ConfigMapNamespaceLister + namespace string +} + +func (g combinedConfigMapGetter) ConfigMaps(namespace string) corev1client.ConfigMapInterface { + return combinedConfigMapInterface{ + ConfigMapInterface: g.client.ConfigMaps(namespace), + lister: g.listers.InformersFor(namespace).Core().V1().ConfigMaps().Lister().ConfigMaps(namespace), + namespace: namespace, + } +} + +func (g combinedConfigMapInterface) Get(_ context.Context, name string, options metav1.GetOptions) (*corev1.ConfigMap, error) { + if !equality.Semantic.DeepEqual(options, emptyGetOptions) { + return nil, fmt.Errorf("GetOptions are not honored by cached client: %#v", options) + } + + ret, err := g.lister.Get(name) + if err != nil { + return nil, err + } + return ret.DeepCopy(), nil +} +func (g combinedConfigMapInterface) List(_ context.Context, options metav1.ListOptions) (*corev1.ConfigMapList, error) { + if !equality.Semantic.DeepEqual(options, emptyListOptions) { + return nil, fmt.Errorf("ListOptions are not honored by cached client: %#v", options) + } + + list, err := g.lister.List(labels.Everything()) + if err != nil { + return nil, err + } + + ret := &corev1.ConfigMapList{} + for i := range list { + ret.Items = append(ret.Items, *(list[i].DeepCopy())) + } + return ret, nil +} + +type combinedSecretGetter struct { + client corev1client.SecretsGetter + listers KubeInformersForNamespaces +} + +func CachedSecretGetter(client corev1client.SecretsGetter, listers KubeInformersForNamespaces) corev1client.SecretsGetter { + return &combinedSecretGetter{ + client: client, + listers: listers, + } +} + +type combinedSecretInterface struct { + corev1client.SecretInterface + lister corev1listers.SecretNamespaceLister + namespace string +} + +func (g combinedSecretGetter) Secrets(namespace string) corev1client.SecretInterface { + return combinedSecretInterface{ + SecretInterface: g.client.Secrets(namespace), + lister: g.listers.InformersFor(namespace).Core().V1().Secrets().Lister().Secrets(namespace), + namespace: namespace, + } +} + +func (g combinedSecretInterface) Get(_ context.Context, name string, options metav1.GetOptions) (*corev1.Secret, error) { + if !equality.Semantic.DeepEqual(options, emptyGetOptions) { + return nil, fmt.Errorf("GetOptions are not honored by cached client: %#v", options) + } + + ret, err := g.lister.Get(name) + if err != nil { + return nil, err + } + return ret.DeepCopy(), nil +} + +func (g combinedSecretInterface) List(_ context.Context, options metav1.ListOptions) (*corev1.SecretList, error) { + if !equality.Semantic.DeepEqual(options, emptyListOptions) { + return nil, fmt.Errorf("ListOptions are not honored by cached client: %#v", options) + } + + list, err := g.lister.List(labels.Everything()) + if err != nil { + return nil, err + } + + ret := &corev1.SecretList{} + for i := range list { + ret.Items = append(ret.Items, *(list[i].DeepCopy())) + } + return ret, nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/fake_informers.go b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/fake_informers.go new file mode 100644 index 000000000..893332897 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/fake_informers.go @@ -0,0 +1,7 @@ +package v1helpers + +import "k8s.io/client-go/informers" + +func NewFakeKubeInformersForNamespaces(informers map[string]informers.SharedInformerFactory) KubeInformersForNamespaces { + return kubeInformersForNamespaces(informers) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/helpers.go b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/helpers.go new file mode 100644 index 000000000..f0f2958d2 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/helpers.go @@ -0,0 +1,485 @@ +package v1helpers + +import ( + "context" + "errors" + "fmt" + "os" + "sort" + "strings" + "time" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/equality" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/client-go/util/retry" + + "github.com/ghodss/yaml" + + configv1 "github.com/openshift/api/config/v1" + operatorv1 "github.com/openshift/api/operator/v1" +) + +// SetOperandVersion sets the new version and returns the previous value. +func SetOperandVersion(versions *[]configv1.OperandVersion, operandVersion configv1.OperandVersion) string { + if versions == nil { + versions = &[]configv1.OperandVersion{} + } + existingVersion := FindOperandVersion(*versions, operandVersion.Name) + if existingVersion == nil { + *versions = append(*versions, operandVersion) + return "" + } + + previous := existingVersion.Version + existingVersion.Version = operandVersion.Version + return previous +} + +func FindOperandVersion(versions []configv1.OperandVersion, name string) *configv1.OperandVersion { + if versions == nil { + return nil + } + for i := range versions { + if versions[i].Name == name { + return &versions[i] + } + } + return nil +} + +func SetOperatorCondition(conditions *[]operatorv1.OperatorCondition, newCondition operatorv1.OperatorCondition) { + if conditions == nil { + conditions = &[]operatorv1.OperatorCondition{} + } + existingCondition := FindOperatorCondition(*conditions, newCondition.Type) + if existingCondition == nil { + newCondition.LastTransitionTime = metav1.NewTime(time.Now()) + *conditions = append(*conditions, newCondition) + return + } + + if existingCondition.Status != newCondition.Status { + existingCondition.Status = newCondition.Status + existingCondition.LastTransitionTime = metav1.NewTime(time.Now()) + } + + existingCondition.Reason = newCondition.Reason + existingCondition.Message = newCondition.Message +} + +func RemoveOperatorCondition(conditions *[]operatorv1.OperatorCondition, conditionType string) { + if conditions == nil { + conditions = &[]operatorv1.OperatorCondition{} + } + newConditions := []operatorv1.OperatorCondition{} + for _, condition := range *conditions { + if condition.Type != conditionType { + newConditions = append(newConditions, condition) + } + } + + *conditions = newConditions +} + +func FindOperatorCondition(conditions []operatorv1.OperatorCondition, conditionType string) *operatorv1.OperatorCondition { + for i := range conditions { + if conditions[i].Type == conditionType { + return &conditions[i] + } + } + + return nil +} + +func IsOperatorConditionTrue(conditions []operatorv1.OperatorCondition, conditionType string) bool { + return IsOperatorConditionPresentAndEqual(conditions, conditionType, operatorv1.ConditionTrue) +} + +func IsOperatorConditionFalse(conditions []operatorv1.OperatorCondition, conditionType string) bool { + return IsOperatorConditionPresentAndEqual(conditions, conditionType, operatorv1.ConditionFalse) +} + +func IsOperatorConditionPresentAndEqual(conditions []operatorv1.OperatorCondition, conditionType string, status operatorv1.ConditionStatus) bool { + for _, condition := range conditions { + if condition.Type == conditionType { + return condition.Status == status + } + } + return false +} + +// UpdateOperatorSpecFunc is a func that mutates an operator spec. +type UpdateOperatorSpecFunc func(spec *operatorv1.OperatorSpec) error + +// UpdateSpec applies the update funcs to the oldStatus and tries to update via the client. +func UpdateSpec(ctx context.Context, client OperatorClient, updateFuncs ...UpdateOperatorSpecFunc) (*operatorv1.OperatorSpec, bool, error) { + updated := false + var operatorSpec *operatorv1.OperatorSpec + err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { + oldSpec, _, resourceVersion, err := client.GetOperatorState() + if err != nil { + return err + } + + newSpec := oldSpec.DeepCopy() + for _, update := range updateFuncs { + if err := update(newSpec); err != nil { + return err + } + } + + if equality.Semantic.DeepEqual(oldSpec, newSpec) { + return nil + } + + operatorSpec, _, err = client.UpdateOperatorSpec(ctx, resourceVersion, newSpec) + updated = err == nil + return err + }) + + return operatorSpec, updated, err +} + +// UpdateSpecConfigFn returns a func to update the config. +func UpdateObservedConfigFn(config map[string]interface{}) UpdateOperatorSpecFunc { + return func(oldSpec *operatorv1.OperatorSpec) error { + oldSpec.ObservedConfig = runtime.RawExtension{Object: &unstructured.Unstructured{Object: config}} + return nil + } +} + +// UpdateStatusFunc is a func that mutates an operator status. +type UpdateStatusFunc func(status *operatorv1.OperatorStatus) error + +// UpdateStatus applies the update funcs to the oldStatus and tries to update via the client. +func UpdateStatus(ctx context.Context, client OperatorClient, updateFuncs ...UpdateStatusFunc) (*operatorv1.OperatorStatus, bool, error) { + updated := false + var updatedOperatorStatus *operatorv1.OperatorStatus + err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { + _, oldStatus, resourceVersion, err := client.GetOperatorState() + if err != nil { + return err + } + + newStatus := oldStatus.DeepCopy() + for _, update := range updateFuncs { + if err := update(newStatus); err != nil { + return err + } + } + + if equality.Semantic.DeepEqual(oldStatus, newStatus) { + // We return the newStatus which is a deep copy of oldStatus but with all update funcs applied. + updatedOperatorStatus = newStatus + return nil + } + + updatedOperatorStatus, err = client.UpdateOperatorStatus(ctx, resourceVersion, newStatus) + updated = err == nil + return err + }) + + return updatedOperatorStatus, updated, err +} + +// UpdateConditionFunc returns a func to update a condition. +func UpdateConditionFn(cond operatorv1.OperatorCondition) UpdateStatusFunc { + return func(oldStatus *operatorv1.OperatorStatus) error { + SetOperatorCondition(&oldStatus.Conditions, cond) + return nil + } +} + +// UpdateStatusFunc is a func that mutates an operator status. +type UpdateStaticPodStatusFunc func(status *operatorv1.StaticPodOperatorStatus) error + +// UpdateStaticPodStatus applies the update funcs to the oldStatus abd tries to update via the client. +func UpdateStaticPodStatus(ctx context.Context, client StaticPodOperatorClient, updateFuncs ...UpdateStaticPodStatusFunc) (*operatorv1.StaticPodOperatorStatus, bool, error) { + updated := false + var updatedOperatorStatus *operatorv1.StaticPodOperatorStatus + err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { + _, oldStatus, resourceVersion, err := client.GetStaticPodOperatorState() + if err != nil { + return err + } + + newStatus := oldStatus.DeepCopy() + for _, update := range updateFuncs { + if err := update(newStatus); err != nil { + return err + } + } + + if equality.Semantic.DeepEqual(oldStatus, newStatus) { + // We return the newStatus which is a deep copy of oldStatus but with all update funcs applied. + updatedOperatorStatus = newStatus + return nil + } + + updatedOperatorStatus, err = client.UpdateStaticPodOperatorStatus(ctx, resourceVersion, newStatus) + updated = err == nil + return err + }) + + return updatedOperatorStatus, updated, err +} + +// UpdateStaticPodConditionFn returns a func to update a condition. +func UpdateStaticPodConditionFn(cond operatorv1.OperatorCondition) UpdateStaticPodStatusFunc { + return func(oldStatus *operatorv1.StaticPodOperatorStatus) error { + SetOperatorCondition(&oldStatus.Conditions, cond) + return nil + } +} + +// EnsureFinalizer adds a new finalizer to the operator CR, if it does not exists. No-op otherwise. +// The finalizer name is computed from the controller name and operator name ($OPERATOR_NAME or os.Args[0]) +// It re-tries on conflicts. +func EnsureFinalizer(ctx context.Context, client OperatorClientWithFinalizers, controllerName string) error { + finalizer := getFinalizerName(controllerName) + err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { + return client.EnsureFinalizer(ctx, finalizer) + }) + return err +} + +// RemoveFinalizer removes a finalizer from the operator CR, if it is there. No-op otherwise. +// The finalizer name is computed from the controller name and operator name ($OPERATOR_NAME or os.Args[0]) +// It re-tries on conflicts. +func RemoveFinalizer(ctx context.Context, client OperatorClientWithFinalizers, controllerName string) error { + finalizer := getFinalizerName(controllerName) + err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { + return client.RemoveFinalizer(ctx, finalizer) + }) + return err +} + +// getFinalizerName computes a nice finalizer name from controllerName and the operator name ($OPERATOR_NAME or os.Args[0]). +func getFinalizerName(controllerName string) string { + return fmt.Sprintf("%s.operator.openshift.io/%s", getOperatorName(), controllerName) +} + +func getOperatorName() string { + if name := os.Getenv("OPERATOR_NAME"); name != "" { + return name + } + return os.Args[0] +} + +type aggregate []error + +var _ utilerrors.Aggregate = aggregate{} + +// NewMultiLineAggregate returns an aggregate error with multi-line output +func NewMultiLineAggregate(errList []error) error { + var errs []error + for _, e := range errList { + if e != nil { + errs = append(errs, e) + } + } + if len(errs) == 0 { + return nil + } + return aggregate(errs) +} + +// Error is part of the error interface. +func (agg aggregate) Error() string { + msgs := make([]string, len(agg)) + for i := range agg { + msgs[i] = agg[i].Error() + } + return strings.Join(msgs, "\n") +} + +// Errors is part of the Aggregate interface. +func (agg aggregate) Errors() []error { + return []error(agg) +} + +// Is is part of the Aggregate interface +func (agg aggregate) Is(target error) bool { + return agg.visit(func(err error) bool { + return errors.Is(err, target) + }) +} + +func (agg aggregate) visit(f func(err error) bool) bool { + for _, err := range agg { + switch err := err.(type) { + case aggregate: + if match := err.visit(f); match { + return match + } + case utilerrors.Aggregate: + for _, nestedErr := range err.Errors() { + if match := f(nestedErr); match { + return match + } + } + default: + if match := f(err); match { + return match + } + } + } + + return false +} + +// MapToEnvVars converts a string-string map to a slice of corev1.EnvVar-s +func MapToEnvVars(mapEnvVars map[string]string) []corev1.EnvVar { + if mapEnvVars == nil { + return nil + } + + envVars := make([]corev1.EnvVar, len(mapEnvVars)) + i := 0 + for k, v := range mapEnvVars { + envVars[i] = corev1.EnvVar{Name: k, Value: v} + i++ + } + + // need to sort the slice so that kube-controller-manager-pod configmap does not change all the time + sort.Slice(envVars, func(i, j int) bool { return envVars[i].Name < envVars[j].Name }) + return envVars +} + +// InjectObservedProxyIntoContainers injects proxy environment variables in containers specified in containerNames. +func InjectObservedProxyIntoContainers(podSpec *corev1.PodSpec, containerNames []string, observedConfig []byte, fields ...string) error { + var config map[string]interface{} + if err := yaml.Unmarshal(observedConfig, &config); err != nil { + return fmt.Errorf("failed to unmarshal the observedConfig: %w", err) + } + + proxyConfig, found, err := unstructured.NestedStringMap(config, fields...) + if err != nil { + return fmt.Errorf("couldn't get the proxy config from observedConfig: %w", err) + } + + proxyEnvVars := MapToEnvVars(proxyConfig) + if !found || len(proxyEnvVars) < 1 { + // There's no observed proxy config, we should tolerate that + return nil + } + + for _, containerName := range containerNames { + for i := range podSpec.InitContainers { + if podSpec.InitContainers[i].Name == containerName { + podSpec.InitContainers[i].Env = append(podSpec.InitContainers[i].Env, proxyEnvVars...) + } + } + for i := range podSpec.Containers { + if podSpec.Containers[i].Name == containerName { + podSpec.Containers[i].Env = append(podSpec.Containers[i].Env, proxyEnvVars...) + } + } + } + + return nil +} + +func InjectTrustedCAIntoContainers(podSpec *corev1.PodSpec, configMapName string, containerNames []string) error { + podSpec.Volumes = append(podSpec.Volumes, corev1.Volume{ + Name: "non-standard-root-system-trust-ca-bundle", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: configMapName, + }, + Items: []corev1.KeyToPath{ + {Key: "ca-bundle.crt", Path: "tls-ca-bundle.pem"}, + }, + }, + }, + }) + + for _, containerName := range containerNames { + for i := range podSpec.InitContainers { + if podSpec.InitContainers[i].Name == containerName { + podSpec.InitContainers[i].VolumeMounts = append(podSpec.InitContainers[i].VolumeMounts, corev1.VolumeMount{ + Name: "non-standard-root-system-trust-ca-bundle", + MountPath: "/etc/pki/ca-trust/extracted/pem", + ReadOnly: true, + }) + } + } + for i := range podSpec.Containers { + if podSpec.Containers[i].Name == containerName { + podSpec.Containers[i].VolumeMounts = append(podSpec.Containers[i].VolumeMounts, corev1.VolumeMount{ + Name: "non-standard-root-system-trust-ca-bundle", + MountPath: "/etc/pki/ca-trust/extracted/pem", + ReadOnly: true, + }) + } + } + } + + return nil +} + +func SetCondition(conditions *[]metav1.Condition, newCondition metav1.Condition) { + if conditions == nil { + conditions = &[]metav1.Condition{} + } + existingCondition := FindCondition(*conditions, newCondition.Type) + if existingCondition == nil { + newCondition.LastTransitionTime = metav1.NewTime(time.Now()) + *conditions = append(*conditions, newCondition) + return + } + + if existingCondition.Status != newCondition.Status { + existingCondition.Status = newCondition.Status + existingCondition.LastTransitionTime = metav1.NewTime(time.Now()) + } + + existingCondition.Reason = newCondition.Reason + existingCondition.Message = newCondition.Message +} + +func RemoveCondition(conditions *[]metav1.Condition, conditionType string) { + if conditions == nil { + conditions = &[]metav1.Condition{} + } + newConditions := []metav1.Condition{} + for _, condition := range *conditions { + if condition.Type != conditionType { + newConditions = append(newConditions, condition) + } + } + + *conditions = newConditions +} + +func FindCondition(conditions []metav1.Condition, conditionType string) *metav1.Condition { + for i := range conditions { + if conditions[i].Type == conditionType { + return &conditions[i] + } + } + + return nil +} + +func IsConditionTrue(conditions []metav1.Condition, conditionType string) bool { + return IsConditionPresentAndEqual(conditions, conditionType, metav1.ConditionTrue) +} + +func IsConditionFalse(conditions []metav1.Condition, conditionType string) bool { + return IsConditionPresentAndEqual(conditions, conditionType, metav1.ConditionFalse) +} + +func IsConditionPresentAndEqual(conditions []metav1.Condition, conditionType string, status metav1.ConditionStatus) bool { + for _, condition := range conditions { + if condition.Type == conditionType { + return condition.Status == status + } + } + return false +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/informers.go b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/informers.go new file mode 100644 index 000000000..ba3769252 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/informers.go @@ -0,0 +1,135 @@ +package v1helpers + +import ( + "fmt" + "time" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" + corev1listers "k8s.io/client-go/listers/core/v1" +) + +// KubeInformersForNamespaces is a simple way to combine several shared informers into a single struct with unified listing power +type KubeInformersForNamespaces interface { + Start(stopCh <-chan struct{}) + InformersFor(namespace string) informers.SharedInformerFactory + Namespaces() sets.String + + ConfigMapLister() corev1listers.ConfigMapLister + SecretLister() corev1listers.SecretLister + + // Used in by workloads controller and controllers that report deployment pods status + PodLister() corev1listers.PodLister +} + +var _ KubeInformersForNamespaces = kubeInformersForNamespaces{} + +func NewKubeInformersForNamespaces(kubeClient kubernetes.Interface, namespaces ...string) KubeInformersForNamespaces { + ret := kubeInformersForNamespaces{} + for _, namespace := range namespaces { + if len(namespace) == 0 { + ret[""] = informers.NewSharedInformerFactory(kubeClient, 10*time.Minute) + continue + } + ret[namespace] = informers.NewSharedInformerFactoryWithOptions(kubeClient, 10*time.Minute, informers.WithNamespace(namespace)) + } + + return ret +} + +type kubeInformersForNamespaces map[string]informers.SharedInformerFactory + +func (i kubeInformersForNamespaces) Start(stopCh <-chan struct{}) { + for _, informer := range i { + informer.Start(stopCh) + } +} + +func (i kubeInformersForNamespaces) Namespaces() sets.String { + return sets.StringKeySet(i) +} +func (i kubeInformersForNamespaces) InformersFor(namespace string) informers.SharedInformerFactory { + return i[namespace] +} + +func (i kubeInformersForNamespaces) HasInformersFor(namespace string) bool { + return i.InformersFor(namespace) != nil +} + +type configMapLister kubeInformersForNamespaces + +func (i kubeInformersForNamespaces) ConfigMapLister() corev1listers.ConfigMapLister { + return configMapLister(i) +} + +func (l configMapLister) List(selector labels.Selector) (ret []*corev1.ConfigMap, err error) { + globalInformer, ok := l[""] + if !ok { + return nil, fmt.Errorf("combinedLister does not support cross namespace list") + } + + return globalInformer.Core().V1().ConfigMaps().Lister().List(selector) +} + +func (l configMapLister) ConfigMaps(namespace string) corev1listers.ConfigMapNamespaceLister { + informer, ok := l[namespace] + if !ok { + // coding error + panic(fmt.Sprintf("namespace %q is missing", namespace)) + } + + return informer.Core().V1().ConfigMaps().Lister().ConfigMaps(namespace) +} + +type secretLister kubeInformersForNamespaces + +func (i kubeInformersForNamespaces) SecretLister() corev1listers.SecretLister { + return secretLister(i) +} + +func (l secretLister) List(selector labels.Selector) (ret []*corev1.Secret, err error) { + globalInformer, ok := l[""] + if !ok { + return nil, fmt.Errorf("combinedLister does not support cross namespace list") + } + + return globalInformer.Core().V1().Secrets().Lister().List(selector) +} + +func (l secretLister) Secrets(namespace string) corev1listers.SecretNamespaceLister { + informer, ok := l[namespace] + if !ok { + // coding error + panic(fmt.Sprintf("namespace %q is missing", namespace)) + } + + return informer.Core().V1().Secrets().Lister().Secrets(namespace) +} + +type podLister kubeInformersForNamespaces + +func (i kubeInformersForNamespaces) PodLister() corev1listers.PodLister { + return podLister(i) +} + +func (l podLister) List(selector labels.Selector) (ret []*corev1.Pod, err error) { + globalInformer, ok := l[""] + if !ok { + return nil, fmt.Errorf("combinedLister does not support cross namespace list") + } + + return globalInformer.Core().V1().Pods().Lister().List(selector) +} + +func (l podLister) Pods(namespace string) corev1listers.PodNamespaceLister { + informer, ok := l[namespace] + if !ok { + // coding error + panic(fmt.Sprintf("namespace %q is missing", namespace)) + } + + return informer.Core().V1().Pods().Lister().Pods(namespace) +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/interfaces.go b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/interfaces.go new file mode 100644 index 000000000..d61d30294 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/interfaces.go @@ -0,0 +1,43 @@ +package v1helpers + +import ( + "context" + + operatorv1 "github.com/openshift/api/operator/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/cache" +) + +type OperatorClient interface { + Informer() cache.SharedIndexInformer + // GetObjectMeta return the operator metadata. + GetObjectMeta() (meta *metav1.ObjectMeta, err error) + // GetOperatorState returns the operator spec, status and the resource version, potentially from a lister. + GetOperatorState() (spec *operatorv1.OperatorSpec, status *operatorv1.OperatorStatus, resourceVersion string, err error) + // UpdateOperatorSpec updates the spec of the operator, assuming the given resource version. + UpdateOperatorSpec(ctx context.Context, oldResourceVersion string, in *operatorv1.OperatorSpec) (out *operatorv1.OperatorSpec, newResourceVersion string, err error) + // UpdateOperatorStatus updates the status of the operator, assuming the given resource version. + UpdateOperatorStatus(ctx context.Context, oldResourceVersion string, in *operatorv1.OperatorStatus) (out *operatorv1.OperatorStatus, err error) +} + +type StaticPodOperatorClient interface { + OperatorClient + // GetStaticPodOperatorState returns the static pod operator spec, status and the resource version, + // potentially from a lister. + GetStaticPodOperatorState() (spec *operatorv1.StaticPodOperatorSpec, status *operatorv1.StaticPodOperatorStatus, resourceVersion string, err error) + // GetStaticPodOperatorStateWithQuorum return the static pod operator spec, status and resource version + // directly from a server read. + GetStaticPodOperatorStateWithQuorum(ctx context.Context) (spec *operatorv1.StaticPodOperatorSpec, status *operatorv1.StaticPodOperatorStatus, resourceVersion string, err error) + // UpdateStaticPodOperatorStatus updates the status, assuming the given resource version. + UpdateStaticPodOperatorStatus(ctx context.Context, resourceVersion string, in *operatorv1.StaticPodOperatorStatus) (out *operatorv1.StaticPodOperatorStatus, err error) + // UpdateStaticPodOperatorSpec updates the spec, assuming the given resource version. + UpdateStaticPodOperatorSpec(ctx context.Context, resourceVersion string, in *operatorv1.StaticPodOperatorSpec) (out *operatorv1.StaticPodOperatorSpec, newResourceVersion string, err error) +} + +type OperatorClientWithFinalizers interface { + OperatorClient + // EnsureFinalizer adds a new finalizer to the operator CR, if it does not exists. No-op otherwise. + EnsureFinalizer(ctx context.Context, finalizer string) error + // RemoveFinalizer removes a finalizer from the operator CR, if it is there. No-op otherwise. + RemoveFinalizer(ctx context.Context, finalizer string) error +} diff --git a/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/test_helpers.go b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/test_helpers.go new file mode 100644 index 000000000..004adc2be --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/operator/v1helpers/test_helpers.go @@ -0,0 +1,302 @@ +package v1helpers + +import ( + "context" + "fmt" + "strconv" + "time" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/kubernetes" + corev1listers "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/tools/cache" + + operatorv1 "github.com/openshift/api/operator/v1" +) + +// NewFakeSharedIndexInformer returns a fake shared index informer, suitable to use in static pod controller unit tests. +func NewFakeSharedIndexInformer() cache.SharedIndexInformer { + return &fakeSharedIndexInformer{} +} + +type fakeSharedIndexInformer struct{} + +func (i fakeSharedIndexInformer) AddEventHandler(handler cache.ResourceEventHandler) (cache.ResourceEventHandlerRegistration, error) { + return nil, nil +} + +func (i fakeSharedIndexInformer) AddEventHandlerWithResyncPeriod(handler cache.ResourceEventHandler, resyncPeriod time.Duration) (cache.ResourceEventHandlerRegistration, error) { + return nil, nil +} + +func (i fakeSharedIndexInformer) RemoveEventHandler(handle cache.ResourceEventHandlerRegistration) error { + panic("implement me") +} + +func (i fakeSharedIndexInformer) IsStopped() bool { + panic("implement me") +} + +func (fakeSharedIndexInformer) GetStore() cache.Store { + panic("implement me") +} + +func (fakeSharedIndexInformer) GetController() cache.Controller { + panic("implement me") +} + +func (fakeSharedIndexInformer) Run(stopCh <-chan struct{}) { + panic("implement me") +} + +func (fakeSharedIndexInformer) HasSynced() bool { + return true +} + +func (fakeSharedIndexInformer) LastSyncResourceVersion() string { + panic("implement me") +} + +func (fakeSharedIndexInformer) AddIndexers(indexers cache.Indexers) error { + panic("implement me") +} + +func (fakeSharedIndexInformer) GetIndexer() cache.Indexer { + panic("implement me") +} + +func (fakeSharedIndexInformer) SetWatchErrorHandler(handler cache.WatchErrorHandler) error { + panic("implement me") +} + +func (fakeSharedIndexInformer) SetTransform(f cache.TransformFunc) error { + panic("implement me") +} + +// NewFakeStaticPodOperatorClient returns a fake operator client suitable to use in static pod controller unit tests. +func NewFakeStaticPodOperatorClient( + staticPodSpec *operatorv1.StaticPodOperatorSpec, staticPodStatus *operatorv1.StaticPodOperatorStatus, + triggerStatusErr func(rv string, status *operatorv1.StaticPodOperatorStatus) error, + triggerSpecErr func(rv string, spec *operatorv1.StaticPodOperatorSpec) error) StaticPodOperatorClient { + return &fakeStaticPodOperatorClient{ + fakeStaticPodOperatorSpec: staticPodSpec, + fakeStaticPodOperatorStatus: staticPodStatus, + resourceVersion: "0", + triggerStatusUpdateError: triggerStatusErr, + triggerSpecUpdateError: triggerSpecErr, + } +} + +type fakeStaticPodOperatorClient struct { + fakeStaticPodOperatorSpec *operatorv1.StaticPodOperatorSpec + fakeStaticPodOperatorStatus *operatorv1.StaticPodOperatorStatus + resourceVersion string + triggerStatusUpdateError func(rv string, status *operatorv1.StaticPodOperatorStatus) error + triggerSpecUpdateError func(rv string, status *operatorv1.StaticPodOperatorSpec) error +} + +func (c *fakeStaticPodOperatorClient) Informer() cache.SharedIndexInformer { + return &fakeSharedIndexInformer{} + +} +func (c *fakeStaticPodOperatorClient) GetObjectMeta() (*metav1.ObjectMeta, error) { + panic("not supported") +} + +func (c *fakeStaticPodOperatorClient) GetStaticPodOperatorState() (*operatorv1.StaticPodOperatorSpec, *operatorv1.StaticPodOperatorStatus, string, error) { + return c.fakeStaticPodOperatorSpec, c.fakeStaticPodOperatorStatus, c.resourceVersion, nil +} + +func (c *fakeStaticPodOperatorClient) GetStaticPodOperatorStateWithQuorum(ctx context.Context) (*operatorv1.StaticPodOperatorSpec, *operatorv1.StaticPodOperatorStatus, string, error) { + return c.fakeStaticPodOperatorSpec, c.fakeStaticPodOperatorStatus, c.resourceVersion, nil +} + +func (c *fakeStaticPodOperatorClient) UpdateStaticPodOperatorStatus(ctx context.Context, resourceVersion string, status *operatorv1.StaticPodOperatorStatus) (*operatorv1.StaticPodOperatorStatus, error) { + if c.resourceVersion != resourceVersion { + return nil, errors.NewConflict(schema.GroupResource{Group: operatorv1.GroupName, Resource: "TestOperatorConfig"}, "instance", fmt.Errorf("invalid resourceVersion")) + } + rv, err := strconv.Atoi(resourceVersion) + if err != nil { + return nil, err + } + c.resourceVersion = strconv.Itoa(rv + 1) + if c.triggerStatusUpdateError != nil { + if err := c.triggerStatusUpdateError(resourceVersion, status); err != nil { + return nil, err + } + } + c.fakeStaticPodOperatorStatus = status + return c.fakeStaticPodOperatorStatus, nil +} + +func (c *fakeStaticPodOperatorClient) UpdateStaticPodOperatorSpec(ctx context.Context, resourceVersion string, spec *operatorv1.StaticPodOperatorSpec) (*operatorv1.StaticPodOperatorSpec, string, error) { + if c.resourceVersion != resourceVersion { + return nil, "", errors.NewConflict(schema.GroupResource{Group: operatorv1.GroupName, Resource: "TestOperatorConfig"}, "instance", fmt.Errorf("invalid resourceVersion")) + } + rv, err := strconv.Atoi(resourceVersion) + if err != nil { + return nil, "", err + } + c.resourceVersion = strconv.Itoa(rv + 1) + if c.triggerSpecUpdateError != nil { + if err := c.triggerSpecUpdateError(resourceVersion, spec); err != nil { + return nil, "", err + } + } + c.fakeStaticPodOperatorSpec = spec + return c.fakeStaticPodOperatorSpec, c.resourceVersion, nil +} + +func (c *fakeStaticPodOperatorClient) GetOperatorState() (*operatorv1.OperatorSpec, *operatorv1.OperatorStatus, string, error) { + return &c.fakeStaticPodOperatorSpec.OperatorSpec, &c.fakeStaticPodOperatorStatus.OperatorStatus, c.resourceVersion, nil +} +func (c *fakeStaticPodOperatorClient) UpdateOperatorSpec(ctx context.Context, s string, p *operatorv1.OperatorSpec) (spec *operatorv1.OperatorSpec, resourceVersion string, err error) { + panic("not supported") +} +func (c *fakeStaticPodOperatorClient) UpdateOperatorStatus(ctx context.Context, resourceVersion string, status *operatorv1.OperatorStatus) (*operatorv1.OperatorStatus, error) { + if c.resourceVersion != resourceVersion { + return nil, errors.NewConflict(schema.GroupResource{Group: operatorv1.GroupName, Resource: "TestOperatorConfig"}, "instance", fmt.Errorf("invalid resourceVersion")) + } + rv, err := strconv.Atoi(resourceVersion) + if err != nil { + return nil, err + } + c.resourceVersion = strconv.Itoa(rv + 1) + if c.triggerStatusUpdateError != nil { + staticPodStatus := c.fakeStaticPodOperatorStatus.DeepCopy() + staticPodStatus.OperatorStatus = *status + if err := c.triggerStatusUpdateError(resourceVersion, staticPodStatus); err != nil { + return nil, err + } + } + c.fakeStaticPodOperatorStatus.OperatorStatus = *status + return &c.fakeStaticPodOperatorStatus.OperatorStatus, nil +} + +// NewFakeNodeLister returns a fake node lister suitable to use in node controller unit test +func NewFakeNodeLister(client kubernetes.Interface) corev1listers.NodeLister { + return &fakeNodeLister{client: client} +} + +type fakeNodeLister struct { + client kubernetes.Interface +} + +func (n *fakeNodeLister) List(selector labels.Selector) ([]*corev1.Node, error) { + nodes, err := n.client.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{LabelSelector: selector.String()}) + if err != nil { + return nil, err + } + ret := []*corev1.Node{} + for i := range nodes.Items { + ret = append(ret, &nodes.Items[i]) + } + return ret, nil +} + +func (n *fakeNodeLister) Get(name string) (*corev1.Node, error) { + panic("implement me") +} + +// NewFakeOperatorClient returns a fake operator client suitable to use in static pod controller unit tests. +func NewFakeOperatorClient(spec *operatorv1.OperatorSpec, status *operatorv1.OperatorStatus, triggerErr func(rv string, status *operatorv1.OperatorStatus) error) OperatorClientWithFinalizers { + return NewFakeOperatorClientWithObjectMeta(nil, spec, status, triggerErr) +} + +func NewFakeOperatorClientWithObjectMeta(meta *metav1.ObjectMeta, spec *operatorv1.OperatorSpec, status *operatorv1.OperatorStatus, triggerErr func(rv string, status *operatorv1.OperatorStatus) error) OperatorClientWithFinalizers { + return &fakeOperatorClient{ + fakeOperatorSpec: spec, + fakeOperatorStatus: status, + fakeObjectMeta: meta, + resourceVersion: "0", + triggerStatusUpdateError: triggerErr, + } +} + +type fakeOperatorClient struct { + fakeOperatorSpec *operatorv1.OperatorSpec + fakeOperatorStatus *operatorv1.OperatorStatus + fakeObjectMeta *metav1.ObjectMeta + resourceVersion string + triggerStatusUpdateError func(rv string, status *operatorv1.OperatorStatus) error +} + +func (c *fakeOperatorClient) Informer() cache.SharedIndexInformer { + return &fakeSharedIndexInformer{} +} + +func (c *fakeOperatorClient) GetObjectMeta() (*metav1.ObjectMeta, error) { + if c.fakeObjectMeta == nil { + return &metav1.ObjectMeta{}, nil + } + + return c.fakeObjectMeta, nil +} + +func (c *fakeOperatorClient) GetOperatorState() (*operatorv1.OperatorSpec, *operatorv1.OperatorStatus, string, error) { + return c.fakeOperatorSpec, c.fakeOperatorStatus, c.resourceVersion, nil +} + +func (c *fakeOperatorClient) UpdateOperatorStatus(ctx context.Context, resourceVersion string, status *operatorv1.OperatorStatus) (*operatorv1.OperatorStatus, error) { + if c.resourceVersion != resourceVersion { + return nil, errors.NewConflict(schema.GroupResource{Group: operatorv1.GroupName, Resource: "TestOperatorConfig"}, "instance", fmt.Errorf("invalid resourceVersion")) + } + rv, err := strconv.Atoi(resourceVersion) + if err != nil { + return nil, err + } + c.resourceVersion = strconv.Itoa(rv + 1) + if c.triggerStatusUpdateError != nil { + if err := c.triggerStatusUpdateError(resourceVersion, status); err != nil { + return nil, err + } + } + c.fakeOperatorStatus = status + return c.fakeOperatorStatus, nil +} + +func (c *fakeOperatorClient) UpdateOperatorSpec(ctx context.Context, resourceVersion string, spec *operatorv1.OperatorSpec) (*operatorv1.OperatorSpec, string, error) { + if c.resourceVersion != resourceVersion { + return nil, c.resourceVersion, errors.NewConflict(schema.GroupResource{Group: operatorv1.GroupName, Resource: "TestOperatorConfig"}, "instance", fmt.Errorf("invalid resourceVersion")) + } + rv, err := strconv.Atoi(resourceVersion) + if err != nil { + return nil, c.resourceVersion, err + } + c.resourceVersion = strconv.Itoa(rv + 1) + c.fakeOperatorSpec = spec + return c.fakeOperatorSpec, c.resourceVersion, nil +} + +func (c *fakeOperatorClient) EnsureFinalizer(ctx context.Context, finalizer string) error { + if c.fakeObjectMeta == nil { + c.fakeObjectMeta = &metav1.ObjectMeta{} + } + for _, f := range c.fakeObjectMeta.Finalizers { + if f == finalizer { + return nil + } + } + c.fakeObjectMeta.Finalizers = append(c.fakeObjectMeta.Finalizers, finalizer) + return nil +} + +func (c *fakeOperatorClient) RemoveFinalizer(ctx context.Context, finalizer string) error { + newFinalizers := []string{} + for _, f := range c.fakeObjectMeta.Finalizers { + if f == finalizer { + continue + } + newFinalizers = append(newFinalizers, f) + } + c.fakeObjectMeta.Finalizers = newFinalizers + return nil +} + +func (c *fakeOperatorClient) SetObjectMeta(meta *metav1.ObjectMeta) { + c.fakeObjectMeta = meta +} diff --git a/vendor/github.com/openshift/machine-config-operator/internal/clients/builder.go b/vendor/github.com/openshift/machine-config-operator/internal/clients/builder.go index 081556bc4..561774d0c 100644 --- a/vendor/github.com/openshift/machine-config-operator/internal/clients/builder.go +++ b/vendor/github.com/openshift/machine-config-operator/internal/clients/builder.go @@ -3,14 +3,16 @@ package clients import ( "os" - "github.com/golang/glog" + buildclientset "github.com/openshift/client-go/build/clientset/versioned" configclientset "github.com/openshift/client-go/config/clientset/versioned" + imageclientset "github.com/openshift/client-go/image/clientset/versioned" operatorclientset "github.com/openshift/client-go/operator/clientset/versioned" mcfgclientset "github.com/openshift/machine-config-operator/pkg/generated/clientset/versioned" apiext "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" + "k8s.io/klog/v2" ) // Builder can create a variety of kubernetes client interface @@ -56,6 +58,14 @@ func (cb *Builder) APIExtClientOrDie(name string) apiext.Interface { return apiext.NewForConfigOrDie(rest.AddUserAgent(cb.config, name)) } +func (cb *Builder) BuildClientOrDie(name string) buildclientset.Interface { + return buildclientset.NewForConfigOrDie(rest.AddUserAgent(cb.config, name)) +} + +func (cb *Builder) ImageClientOrDie(name string) imageclientset.Interface { + return imageclientset.NewForConfigOrDie(rest.AddUserAgent(cb.config, name)) +} + // GetBuilderConfig returns a copy of the builders *rest.Config func (cb *Builder) GetBuilderConfig() *rest.Config { return rest.CopyConfig(cb.config) @@ -71,10 +81,10 @@ func NewBuilder(kubeconfig string) (*Builder, error) { } if kubeconfig != "" { - glog.V(4).Infof("Loading kube client config from path %q", kubeconfig) + klog.V(4).Infof("Loading kube client config from path %q", kubeconfig) config, err = clientcmd.BuildConfigFromFlags("", kubeconfig) } else { - glog.V(4).Infof("Using in-cluster kube client config") + klog.V(4).Infof("Using in-cluster kube client config") config, err = rest.InClusterConfig() } if err != nil { diff --git a/vendor/github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1/helpers.go b/vendor/github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1/helpers.go index 56b60a9aa..77fc8db01 100644 --- a/vendor/github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1/helpers.go +++ b/vendor/github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1/helpers.go @@ -20,13 +20,16 @@ func NewMachineConfigPoolCondition(condType MachineConfigPoolConditionType, stat // GetMachineConfigPoolCondition returns the condition with the provided type. func GetMachineConfigPoolCondition(status MachineConfigPoolStatus, condType MachineConfigPoolConditionType) *MachineConfigPoolCondition { + // in case of sync errors, return the last condition that matches, not the first + // this exists for redundancy and potential race conditions. + var LatestState *MachineConfigPoolCondition for i := range status.Conditions { c := status.Conditions[i] if c.Type == condType { - return &c + LatestState = &c } } - return nil + return LatestState } // SetMachineConfigPoolCondition updates the MachineConfigPool to include the provided condition. If the condition that diff --git a/vendor/github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1/register.go b/vendor/github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1/register.go index 32c4acc2f..bbafc28de 100644 --- a/vendor/github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1/register.go +++ b/vendor/github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1/register.go @@ -1,7 +1,6 @@ package v1 import ( - osev1 "github.com/openshift/api/config/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" @@ -35,8 +34,6 @@ func addKnownTypes(scheme *runtime.Scheme) error { &MachineConfigList{}, &MachineConfigPool{}, &MachineConfigPoolList{}, - &osev1.Node{}, - &osev1.NodeList{}, ) metav1.AddToGroupVersion(scheme, GroupVersion) diff --git a/vendor/github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1/types.go b/vendor/github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1/types.go index 5d64a9de3..8522239ed 100644 --- a/vendor/github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1/types.go +++ b/vendor/github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1/types.go @@ -63,12 +63,22 @@ type ControllerConfigSpec struct { // +nullable AdditionalTrustBundle []byte `json:"additionalTrustBundle"` + // imageRegistryBundleUserData is Image Registry Data provided by the user + ImageRegistryBundleUserData []ImageRegistryBundle `json:"imageRegistryBundleUserData"` + + // imageRegistryBundleData is the ImageRegistryData + ImageRegistryBundleData []ImageRegistryBundle `json:"imageRegistryBundleData"` + // TODO: Investigate using a ConfigMapNameReference for the PullSecret and OSImageURL // pullSecret is the default pull secret that needs to be installed // on all machines. PullSecret *corev1.ObjectReference `json:"pullSecret,omitempty"` + // internalRegistryPullSecret is the pull secret for the internal registry + // +nullable + InternalRegistryPullSecret []byte `json:"internalRegistryPullSecret"` + // images is map of images that are used by the controller to render templates under ./templates/ Images map[string]string `json:"images"` @@ -89,10 +99,12 @@ type ControllerConfigSpec struct { Proxy *configv1.ProxyStatus `json:"proxy"` // infra holds the infrastructure details + // +kubebuilder:validation:EmbeddedResource // +nullable Infra *configv1.Infrastructure `json:"infra"` // dns holds the cluster dns details + // +kubebuilder:validation:EmbeddedResource // +nullable DNS *configv1.DNS `json:"dns"` @@ -111,13 +123,19 @@ type ControllerConfigSpec struct { Network *NetworkInfo `json:"network"` } +type ImageRegistryBundle struct { + File string `json:"file"` + Data []byte `json:"data"` +} + // IPFamiliesType indicates whether the cluster network is IPv4-only, IPv6-only, or dual-stack type IPFamiliesType string const ( - IPFamiliesIPv4 IPFamiliesType = "IPv4" - IPFamiliesIPv6 IPFamiliesType = "IPv6" - IPFamiliesDualStack IPFamiliesType = "DualStack" + IPFamiliesIPv4 IPFamiliesType = "IPv4" + IPFamiliesIPv6 IPFamiliesType = "IPv6" + IPFamiliesDualStack IPFamiliesType = "DualStack" + IPFamiliesDualStackIPv6Primary IPFamiliesType = "DualStackIPv6Primary" ) // Network contains network related configuration @@ -136,6 +154,22 @@ type ControllerConfigStatus struct { // conditions represents the latest available observations of current state. // +optional Conditions []ControllerConfigStatusCondition `json:"conditions"` + + // controllerCertificates represents the latest available observations of the automatically rotating certificates in the MCO. + // +optional + ControllerCertificates []ControllerCertificate `json:"controllerCertificates"` +} + +// ControllerCertificate contains info about a specific cert. +type ControllerCertificate struct { + // subject is the cert subject + Subject string `json:"subject"` + + // signer is the cert Issuer + Signer string `json:"signer"` + + // bundleFile is the larger bundle a cert comes from + BundleFile string `json:"bundleFile"` } // ControllerConfigStatusCondition contains condition information for ControllerConfigStatus @@ -300,6 +334,15 @@ type MachineConfigPoolStatus struct { // conditions represents the latest available observations of current state. // +optional Conditions []MachineConfigPoolCondition `json:"conditions"` + + // certExpirys keeps track of important certificate expiration data + CertExpirys []CertExpiry `json:"certExpirys"` +} + +// ceryExpiry contains the bundle name and the expiry date +type CertExpiry struct { + Bundle string `json:"bundle"` + Subject string `json:"subject"` } // MachineConfigPoolStatusConfiguration stores the current configuration for the pool, and @@ -355,6 +398,14 @@ const ( // MachineConfigPoolDegraded is the overall status of the pool based, today, on whether we fail with NodeDegraded or RenderDegraded MachineConfigPoolDegraded MachineConfigPoolConditionType = "Degraded" + + MachineConfigPoolBuildPending MachineConfigPoolConditionType = "BuildPending" + + MachineConfigPoolBuilding MachineConfigPoolConditionType = "Building" + + MachineConfigPoolBuildSuccess MachineConfigPoolConditionType = "BuildSuccess" + + MachineConfigPoolBuildFailed MachineConfigPoolConditionType = "BuildFailed" ) // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/vendor/github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1/zz_generated.deepcopy.go b/vendor/github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1/zz_generated.deepcopy.go index 1e3417744..3adfe85b3 100644 --- a/vendor/github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1/zz_generated.deepcopy.go @@ -259,6 +259,20 @@ func (in *ControllerConfigSpec) DeepCopyInto(out *ControllerConfigSpec) { (*out)[key] = val } } + if in.ImageRegistryBundleData != nil { + in, out := &in.ImageRegistryBundleData, &out.ImageRegistryBundleData + *out = make([]ImageRegistryBundle, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ImageRegistryBundleUserData != nil { + in, out := &in.ImageRegistryBundleUserData, &out.ImageRegistryBundleUserData + *out = make([]ImageRegistryBundle, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } if in.Proxy != nil { in, out := &in.Proxy, &out.Proxy *out = new(configv1.ProxyStatus) @@ -292,6 +306,12 @@ func (in *ControllerConfigSpec) DeepCopy() *ControllerConfigSpec { return out } +func (in *ControllerCertificate) DeepCopyInto(out *ControllerCertificate) { + *out = *in + return + +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ControllerConfigStatus) DeepCopyInto(out *ControllerConfigStatus) { *out = *in @@ -302,6 +322,13 @@ func (in *ControllerConfigStatus) DeepCopyInto(out *ControllerConfigStatus) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.ControllerCertificates != nil { + in, out := &in.ControllerCertificates, &out.ControllerCertificates + *out = make([]ControllerCertificate, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } @@ -315,6 +342,14 @@ func (in *ControllerConfigStatus) DeepCopy() *ControllerConfigStatus { return out } + +func (in *ImageRegistryBundle) DeepCopyInto(out *ImageRegistryBundle) { + *out = *in + return + +} + + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ControllerConfigStatusCondition) DeepCopyInto(out *ControllerConfigStatusCondition) { *out = *in @@ -332,6 +367,17 @@ func (in *ControllerConfigStatusCondition) DeepCopy() *ControllerConfigStatusCon return out } + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerCertificate. +func (in *ControllerCertificate) DeepCopy() *ControllerCertificate { + if in == nil { + return nil + } + out := new(ControllerCertificate) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *KubeletConfig) DeepCopyInto(out *KubeletConfig) { *out = *in @@ -554,6 +600,23 @@ func (in *MachineConfigPool) DeepCopy() *MachineConfigPool { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertExpiry) DeepCopyInto(out *CertExpiry) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertExpiry. +func (in *CertExpiry) DeepCopy() *CertExpiry { + if in == nil { + return nil + } + out := new(CertExpiry) + in.DeepCopyInto(out) + return out +} + + // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. func (in *MachineConfigPool) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { diff --git a/vendor/github.com/openshift/machine-config-operator/pkg/controller/common/constants.go b/vendor/github.com/openshift/machine-config-operator/pkg/controller/common/constants.go index 12177c361..17c78ad4d 100644 --- a/vendor/github.com/openshift/machine-config-operator/pkg/controller/common/constants.go +++ b/vendor/github.com/openshift/machine-config-operator/pkg/controller/common/constants.go @@ -39,6 +39,23 @@ const ( // MachineConfigPoolMaster is the MachineConfigPool name given to the master MachineConfigPoolMaster = "master" + // MachineConfigPoolWorker is the MachineConfigPool name given to the worker MachineConfigPoolWorker = "worker" + + // LayeringEnabledPoolLabel is the label that enables the "layered" workflow path for a pool. + LayeringEnabledPoolLabel = "machineconfiguration.openshift.io/layering-enabled" + + // ExperimentalNewestLayeredImageEquivalentConfigAnnotationKey is the annotation that signifies which rendered config + // TODO(zzlotnik): Determine if we should use this still. + ExperimentalNewestLayeredImageEquivalentConfigAnnotationKey = "machineconfiguration.openshift.io/newestImageEquivalentConfig" + + OSImageBuildPodLabel = "machineconfiguration.openshift.io/buildPod" + + // InternalMCOIgnitionVersion is the ignition version that the MCO converts everything to internally. The intent here is that + // we should be able to update this constant when we bump the internal ignition version instead of having to hunt down all of + // the version references and figure out "was this supposed to be explicitly 3.4.0 or just the default version which happens + // to be 3.4.0 currently". Ideally if you find an explicit "3.4.0", it's supposed to be "3.4.0" version. If it's this constant, + // it's supposed to be the internal default version. + InternalMCOIgnitionVersion = "3.4.0" ) diff --git a/vendor/github.com/openshift/machine-config-operator/pkg/controller/common/controller_context.go b/vendor/github.com/openshift/machine-config-operator/pkg/controller/common/controller_context.go index d08d78a0e..06932c92c 100644 --- a/vendor/github.com/openshift/machine-config-operator/pkg/controller/common/controller_context.go +++ b/vendor/github.com/openshift/machine-config-operator/pkg/controller/common/controller_context.go @@ -1,21 +1,25 @@ package common import ( + "context" "math/rand" "time" - "github.com/golang/glog" configinformers "github.com/openshift/client-go/config/informers/externalversions" operatorinformers "github.com/openshift/client-go/operator/informers/externalversions" + "github.com/openshift/library-go/pkg/operator/configobserver/featuregates" + "github.com/openshift/library-go/pkg/operator/events" "github.com/openshift/machine-config-operator/internal/clients" daemonconsts "github.com/openshift/machine-config-operator/pkg/daemon/constants" mcfginformers "github.com/openshift/machine-config-operator/pkg/generated/informers/externalversions" + "github.com/openshift/machine-config-operator/pkg/version" apiextinformers "k8s.io/apiextensions-apiserver/pkg/client/informers/externalversions" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/informers" + "k8s.io/klog/v2" ) const ( @@ -52,6 +56,8 @@ type ControllerContext struct { OperatorInformerFactory operatorinformers.SharedInformerFactory KubeMAOSharedInformer informers.SharedInformerFactory + FeatureGateAccess featuregates.FeatureGateAccess + AvailableResources map[schema.GroupVersionResource]bool Stop <-chan struct{} @@ -62,7 +68,7 @@ type ControllerContext struct { } // CreateControllerContext creates the ControllerContext with the ClientBuilder. -func CreateControllerContext(cb *clients.Builder, stop <-chan struct{}, targetNamespace string) *ControllerContext { +func CreateControllerContext(ctx context.Context, cb *clients.Builder, targetNamespace string) *ControllerContext { client := cb.MachineConfigClientOrDie("machine-config-shared-informer") kubeClient := cb.KubeClientOrDie("kube-shared-informer") apiExtClient := cb.APIExtClientOrDie("apiext-shared-informer") @@ -87,7 +93,7 @@ func CreateControllerContext(cb *clients.Builder, stop <-chan struct{}, targetNa assignFilterLabels := func(opts *metav1.ListOptions) { labelsMap, err := labels.ConvertSelectorToLabelsMap(opts.LabelSelector) if err != nil { - glog.Warningf("unable to convert selector %q to map: %v", opts.LabelSelector, err) + klog.Warningf("unable to convert selector %q to map: %v", opts.LabelSelector, err) return } opts.LabelSelector = labels.Merge(labelsMap, map[string]string{daemonconsts.OpenShiftOperatorManagedLabel: ""}).String() @@ -97,6 +103,24 @@ func CreateControllerContext(cb *clients.Builder, stop <-chan struct{}, targetNa configSharedInformer := configinformers.NewSharedInformerFactory(configClient, resyncPeriod()()) operatorSharedInformer := operatorinformers.NewSharedInformerFactory(operatorClient, resyncPeriod()()) + desiredVersion := version.ReleaseVersion + missingVersion := "0.0.1-snapshot" + + controllerRef, err := events.GetControllerReferenceForCurrentPod(ctx, kubeClient, targetNamespace, nil) + if err != nil { + klog.Warningf("unable to get owner reference (falling back to namespace): %v", err) + } + + recorder := events.NewKubeRecorder(kubeClient.CoreV1().Events(targetNamespace), "cloud-controller-manager-operator", controllerRef) + + // By default, this will exit(0) the process if the featuregates ever change to a different set of values. + featureGateAccessor := featuregates.NewFeatureGateAccess( + desiredVersion, missingVersion, + configSharedInformer.Config().V1().ClusterVersions(), configSharedInformer.Config().V1().FeatureGates(), + recorder, + ) + go featureGateAccessor.Run(ctx) + return &ControllerContext{ ClientBuilder: cb, NamespacedInformerFactory: sharedNamespacedInformers, @@ -108,9 +132,10 @@ func CreateControllerContext(cb *clients.Builder, stop <-chan struct{}, targetNa APIExtInformerFactory: apiExtSharedInformer, ConfigInformerFactory: configSharedInformer, OperatorInformerFactory: operatorSharedInformer, - Stop: stop, + Stop: ctx.Done(), InformersStarted: make(chan struct{}), ResyncPeriod: resyncPeriod(), KubeMAOSharedInformer: kubeMAOSharedInformer, + FeatureGateAccess: featureGateAccessor, } } diff --git a/vendor/github.com/openshift/machine-config-operator/pkg/controller/common/helpers.go b/vendor/github.com/openshift/machine-config-operator/pkg/controller/common/helpers.go index b4adada81..f0b04e308 100644 --- a/vendor/github.com/openshift/machine-config-operator/pkg/controller/common/helpers.go +++ b/vendor/github.com/openshift/machine-config-operator/pkg/controller/common/helpers.go @@ -5,13 +5,11 @@ import ( "bytes" "compress/gzip" "context" - "crypto/x509" "encoding/base64" - "encoding/pem" "errors" "fmt" "io" - "io/ioutil" + "io/fs" "net/url" "os" "reflect" @@ -24,29 +22,38 @@ import ( "github.com/coreos/ign-converter/translate/v23tov30" "github.com/coreos/ign-converter/translate/v32tov22" "github.com/coreos/ign-converter/translate/v32tov31" + "github.com/coreos/ign-converter/translate/v33tov32" + "github.com/coreos/ign-converter/translate/v34tov33" ign2error "github.com/coreos/ignition/config/shared/errors" ign2 "github.com/coreos/ignition/config/v2_2" ign2types "github.com/coreos/ignition/config/v2_2/types" ign2_3 "github.com/coreos/ignition/config/v2_3" validate2 "github.com/coreos/ignition/config/validate" ign3error "github.com/coreos/ignition/v2/config/shared/errors" - ign3_0 "github.com/coreos/ignition/v2/config/v3_0" - ign3_1 "github.com/coreos/ignition/v2/config/v3_1" translate3_1 "github.com/coreos/ignition/v2/config/v3_1/translate" ign3_1types "github.com/coreos/ignition/v2/config/v3_1/types" - ign3 "github.com/coreos/ignition/v2/config/v3_2" - translate3 "github.com/coreos/ignition/v2/config/v3_2/translate" - ign3types "github.com/coreos/ignition/v2/config/v3_2/types" + translate3_2 "github.com/coreos/ignition/v2/config/v3_2/translate" + ign3_2types "github.com/coreos/ignition/v2/config/v3_2/types" + translate3_3 "github.com/coreos/ignition/v2/config/v3_3/translate" + ign3_3types "github.com/coreos/ignition/v2/config/v3_3/types" + + ign3 "github.com/coreos/ignition/v2/config/v3_4" + ign3_4 "github.com/coreos/ignition/v2/config/v3_4" + translate3 "github.com/coreos/ignition/v2/config/v3_4/translate" + ign3types "github.com/coreos/ignition/v2/config/v3_4/types" validate3 "github.com/coreos/ignition/v2/config/validate" "github.com/ghodss/yaml" - "github.com/golang/glog" "github.com/vincent-petithory/dataurl" kerr "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/tools/record" + "k8s.io/client-go/tools/reference" + "k8s.io/klog/v2" mcfgv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1" mcfgclientset "github.com/openshift/machine-config-operator/pkg/generated/clientset/versioned" + "github.com/openshift/machine-config-operator/pkg/generated/clientset/versioned/scheme" ) // Gates whether or not the MCO uses the new format base OS container image by default @@ -57,6 +64,11 @@ func strToPtr(s string) *string { return &s } +// bootToPtr converts the input boolean to a pointer to itself +func boolToPtr(b bool) *bool { + return &b +} + // MergeMachineConfigs combines multiple machineconfig objects into one object. // It sorts all the configs in increasing order of their name. // It uses the Ignition config from first object as base and appends all the rest. @@ -95,6 +107,17 @@ func MergeMachineConfigs(configs []*mcfgv1.MachineConfig, cconfig *mcfgv1.Contro outIgn = ign3.Merge(outIgn, mergedIgn) } } + + // For file entries without a default overwrite, set it to true + // The MCO will always overwrite any files, but Ignition will not, + // Causing a difference in behaviour and failures when scaling new nodes into the cluster. + // This was a default change from ign spec2->spec3 which users don't often specify. + for idx := range outIgn.Storage.Files { + if outIgn.Storage.Files[idx].Overwrite == nil { + outIgn.Storage.Files[idx].Overwrite = boolToPtr(true) + } + } + rawOutIgn, err := json.Marshal(outIgn) if err != nil { return nil, err @@ -118,14 +141,7 @@ func MergeMachineConfigs(configs []*mcfgv1.MachineConfig, cconfig *mcfgv1.Contro kargs := []string{} for _, cfg := range configs { for _, arg := range cfg.Spec.KernelArguments { - var present bool - for _, val := range kargs { - if val == arg { - present = true - break - } - } - if !present { + if !InSlice(arg, kargs) { kargs = append(kargs, arg) } } @@ -225,36 +241,23 @@ func WriteTerminationError(err error) { // Disable gosec here to avoid throwing // G306: Expect WriteFile permissions to be 0600 or less // #nosec - ioutil.WriteFile("/dev/termination-log", []byte(msg), 0o644) - glog.Fatal(msg) + os.WriteFile("/dev/termination-log", []byte(msg), 0o644) + klog.Fatal(msg) } // ConvertRawExtIgnitionToV3 ensures that the Ignition config in // the RawExtension is spec v3.2, or translates to it. -func ConvertRawExtIgnitionToV3(inRawExtIgn *runtime.RawExtension) (runtime.RawExtension, error) { - // This function is only used by the MCServer so we don't need to consider v3.0 - _, rptV3, errV3 := ign3.Parse(inRawExtIgn.Raw) - if errV3 == nil && !rptV3.IsFatal() { - // The rawExt is already on V3.2, no need to translate - return *inRawExtIgn, nil - } +func ConvertRawExtIgnitionToV3_4(inRawExtIgn *runtime.RawExtension) (runtime.RawExtension, error) { - var converted3 ign3types.Config - ignCfgV3_1, rptV3_1, errV3_1 := ign3_1.Parse(inRawExtIgn.Raw) - if errV3_1 == nil && !rptV3_1.IsFatal() { - converted3 = translate3.Translate(ignCfgV3_1) - } else { - ignCfg, rpt, err := ign2.Parse(inRawExtIgn.Raw) - if err != nil || rpt.IsFatal() { - return runtime.RawExtension{}, fmt.Errorf("parsing Ignition config spec v2.2 failed with error: %w\nReport: %v", err, rpt) - } - converted3, err = convertIgnition2to3(ignCfg) - if err != nil { - return runtime.RawExtension{}, fmt.Errorf("failed to convert config from spec v2.2 to v3.2: %w", err) - } + // Parse the raw extension to the MCO's current internal ignition version + ignCfgV3, err := IgnParseWrapper(inRawExtIgn.Raw) + if err != nil { + return runtime.RawExtension{}, err } - outIgnV3, err := json.Marshal(converted3) + // TODO(jkyros): we used to only re-marshal this if it was the wrong version, now we're + // re-marshaling every time + outIgnV3, err := json.Marshal(ignCfgV3) if err != nil { return runtime.RawExtension{}, fmt.Errorf("failed to marshal converted config: %w", err) } @@ -265,10 +268,77 @@ func ConvertRawExtIgnitionToV3(inRawExtIgn *runtime.RawExtension) (runtime.RawEx return outRawExt, nil } +// ConvertRawExtIgnitionToV3_3 ensures that the Ignition config in +// the RawExtension is spec v3.3, or translates to it. +func ConvertRawExtIgnitionToV3_3(inRawExtIgn *runtime.RawExtension) (runtime.RawExtension, error) { + rawExt, err := ConvertRawExtIgnitionToV3_4(inRawExtIgn) + if err != nil { + return runtime.RawExtension{}, err + } + + ignCfgV3, rptV3, errV3 := ign3.Parse(rawExt.Raw) + if errV3 != nil || rptV3.IsFatal() { + return runtime.RawExtension{}, fmt.Errorf("parsing Ignition config failed with error: %w\nReport: %v", errV3, rptV3) + } + + // TODO(jkyros): someday we should write a recursive chain-downconverter, but until then, + // we're going to do it the hard way + ignCfgV33, err := convertIgnition34to33(ignCfgV3) + if err != nil { + return runtime.RawExtension{}, err + } + + outIgnV33, err := json.Marshal(ignCfgV33) + if err != nil { + return runtime.RawExtension{}, fmt.Errorf("failed to marshal converted config: %w", err) + } + + outRawExt := runtime.RawExtension{} + outRawExt.Raw = outIgnV33 + + return outRawExt, nil +} + +// ConvertRawExtIgnitionToV3_3 ensures that the Ignition config in +// the RawExtension is spec v3.3, or translates to it. +func ConvertRawExtIgnitionToV3_2(inRawExtIgn *runtime.RawExtension) (runtime.RawExtension, error) { + rawExt, err := ConvertRawExtIgnitionToV3_4(inRawExtIgn) + if err != nil { + return runtime.RawExtension{}, err + } + + ignCfgV3, rptV3, errV3 := ign3.Parse(rawExt.Raw) + if errV3 != nil || rptV3.IsFatal() { + return runtime.RawExtension{}, fmt.Errorf("parsing Ignition config failed with error: %w\nReport: %v", errV3, rptV3) + } + + // TODO(jkyros): someday we should write a recursive chain-downconverter, but until then, + // we're going to do it the hard way + ignCfgV33, err := convertIgnition34to33(ignCfgV3) + if err != nil { + return runtime.RawExtension{}, err + } + + ignCfgV32, err := convertIgnition33to32(ignCfgV33) + if err != nil { + return runtime.RawExtension{}, err + } + + outIgnV32, err := json.Marshal(ignCfgV32) + if err != nil { + return runtime.RawExtension{}, fmt.Errorf("failed to marshal converted config: %w", err) + } + + outRawExt := runtime.RawExtension{} + outRawExt.Raw = outIgnV32 + + return outRawExt, nil +} + // ConvertRawExtIgnitionToV3_1 ensures that the Ignition config in // the RawExtension is spec v3.1, or translates to it. func ConvertRawExtIgnitionToV3_1(inRawExtIgn *runtime.RawExtension) (runtime.RawExtension, error) { - rawExt, err := ConvertRawExtIgnitionToV3(inRawExtIgn) + rawExt, err := ConvertRawExtIgnitionToV3_4(inRawExtIgn) if err != nil { return runtime.RawExtension{}, err } @@ -278,7 +348,19 @@ func ConvertRawExtIgnitionToV3_1(inRawExtIgn *runtime.RawExtension) (runtime.Raw return runtime.RawExtension{}, fmt.Errorf("parsing Ignition config failed with error: %w\nReport: %v", errV3, rptV3) } - ignCfgV31, err := convertIgnition32to31(ignCfgV3) + // TODO(jkyros): someday we should write a recursive chain-downconverter, but until then, + // we're going to do it the hard way + ignCfgV33, err := convertIgnition34to33(ignCfgV3) + if err != nil { + return runtime.RawExtension{}, err + } + + ignCfgV32, err := convertIgnition33to32(ignCfgV33) + if err != nil { + return runtime.RawExtension{}, err + } + + ignCfgV31, err := convertIgnition32to31(ignCfgV32) if err != nil { return runtime.RawExtension{}, err } @@ -296,13 +378,13 @@ func ConvertRawExtIgnitionToV3_1(inRawExtIgn *runtime.RawExtension) (runtime.Raw // ConvertRawExtIgnitionToV2 ensures that the Ignition config in // the RawExtension is spec v2.2, or translates to it. -func ConvertRawExtIgnitionToV2(inRawExtIgn *runtime.RawExtension) (runtime.RawExtension, error) { +func ConvertRawExtIgnitionToV2_2(inRawExtIgn *runtime.RawExtension) (runtime.RawExtension, error) { ignCfg, rpt, err := ign3.Parse(inRawExtIgn.Raw) if err != nil || rpt.IsFatal() { return runtime.RawExtension{}, fmt.Errorf("parsing Ignition config spec v3.2 failed with error: %w\nReport: %v", err, rpt) } - converted2, err := convertIgnition3to2(ignCfg) + converted2, err := convertIgnition34to22(ignCfg) if err != nil { return runtime.RawExtension{}, fmt.Errorf("failed to convert config from spec v3.2 to v2.2: %w", err) } @@ -319,7 +401,7 @@ func ConvertRawExtIgnitionToV2(inRawExtIgn *runtime.RawExtension) (runtime.RawEx } // convertIgnition2to3 takes an ignition spec v2.2 config and returns a v3.2 config -func convertIgnition2to3(ign2config ign2types.Config) (ign3types.Config, error) { +func convertIgnition22to34(ign2config ign2types.Config) (ign3types.Config, error) { // only support writing to root file system fsMap := map[string]string{ "root": "/", @@ -331,31 +413,65 @@ func convertIgnition2to3(ign2config ign2types.Config) (ign3types.Config, error) if err != nil { return ign3types.Config{}, fmt.Errorf("unable to convert Ignition spec v2 config to v3: %w", err) } - // Workaround to get a v3.2 config as output - converted3 := translate3.Translate(translate3_1.Translate(ign3_0config)) + // Workaround to get a v3.4 config as output + converted3 := translate3.Translate(translate3_3.Translate(translate3_2.Translate(translate3_1.Translate(ign3_0config)))) - glog.V(4).Infof("Successfully translated Ignition spec v2 config to Ignition spec v3 config: %v", converted3) + klog.V(4).Infof("Successfully translated Ignition spec v2 config to Ignition spec v3 config: %v", converted3) return converted3, nil } // convertIgnition3to2 takes an ignition spec v3.2 config and returns a v2.2 config -func convertIgnition3to2(ign3config ign3types.Config) (ign2types.Config, error) { - converted2, err := v32tov22.Translate(ign3config) +func convertIgnition34to22(ign3config ign3types.Config) (ign2types.Config, error) { + + // TODO(jkyros): that recursive down-converter is looking like a better idea all the time + converted33, err := convertIgnition34to33(ign3config) + if err != nil { + return ign2types.Config{}, fmt.Errorf("unable to convert Ignition spec v3 config to v2: %w", err) + } + + converted32, err := convertIgnition33to32(converted33) + if err != nil { + return ign2types.Config{}, fmt.Errorf("unable to convert Ignition spec v3 config to v2: %w", err) + } + + converted2, err := v32tov22.Translate(converted32) if err != nil { return ign2types.Config{}, fmt.Errorf("unable to convert Ignition spec v3 config to v2: %w", err) } - glog.V(4).Infof("Successfully translated Ignition spec v3 config to Ignition spec v2 config: %v", converted2) + klog.V(4).Infof("Successfully translated Ignition spec v3 config to Ignition spec v2 config: %v", converted2) return converted2, nil } +// convertIgnition34to33 takes an ignition spec v3.4config and returns a v3.3 config +func convertIgnition34to33(ign3config ign3types.Config) (ign3_3types.Config, error) { + converted33, err := v34tov33.Translate(ign3config) + if err != nil { + return ign3_3types.Config{}, fmt.Errorf("unable to convert Ignition spec v3_2 config to v3_1: %w", err) + } + klog.V(4).Infof("Successfully translated Ignition spec v3_2 config to Ignition spec v3_1 config: %v", converted33) + + return converted33, nil +} + +// convertIgnition33to32 takes an ignition spec v3.3config and returns a v3.2 config +func convertIgnition33to32(ign3config ign3_3types.Config) (ign3_2types.Config, error) { + converted32, err := v33tov32.Translate(ign3config) + if err != nil { + return ign3_2types.Config{}, fmt.Errorf("unable to convert Ignition spec v3_2 config to v3_1: %w", err) + } + klog.V(4).Infof("Successfully translated Ignition spec v3_2 config to Ignition spec v3_1 config: %v", converted32) + + return converted32, nil +} + // convertIgnition32to31 takes an ignition spec v3.2 config and returns a v3.1 config -func convertIgnition32to31(ign3config ign3types.Config) (ign3_1types.Config, error) { +func convertIgnition32to31(ign3config ign3_2types.Config) (ign3_1types.Config, error) { converted31, err := v32tov31.Translate(ign3config) if err != nil { return ign3_1types.Config{}, fmt.Errorf("unable to convert Ignition spec v3_2 config to v3_1: %w", err) } - glog.V(4).Infof("Successfully translated Ignition spec v3_2 config to Ignition spec v3_1 config: %v", converted31) + klog.V(4).Infof("Successfully translated Ignition spec v3_2 config to Ignition spec v3_1 config: %v", converted31) return converted31, nil } @@ -479,39 +595,33 @@ func ValidateMachineConfig(cfg mcfgv1.MachineConfigSpec) error { // IgnParseWrapper parses rawIgn for both V2 and V3 ignition configs and returns // a V2 or V3 Config or an error. This wrapper is necessary since V2 and V3 use different parsers. func IgnParseWrapper(rawIgn []byte) (interface{}, error) { - ignCfgV3_2, rptV3_2, errV3_2 := ign3.Parse(rawIgn) - if errV3_2 == nil && !rptV3_2.IsFatal() { - return ignCfgV3_2, nil - } - if errV3_2.Error() == ign3error.ErrUnknownVersion.Error() { - ignCfgV3_1, rptV3_1, errV3_1 := ign3_1.Parse(rawIgn) - if errV3_1 == nil && !rptV3_1.IsFatal() { - return translate3.Translate(ignCfgV3_1), nil - } - // unlike spec v2 parsers, v3 parsers aren't chained by default so we need to try parsing as spec v3.0 as well - if errV3_1.Error() == ign3error.ErrUnknownVersion.Error() { - ignCfgV3_0, rptV3_0, errV3_0 := ign3_0.Parse(rawIgn) - if errV3_0 == nil && !rptV3_0.IsFatal() { - return translate3.Translate(translate3_1.Translate(ignCfgV3_0)), nil - } + // ParseCompatibleVersion will parse any config <= N to version N + ignCfgV3, rptV3, errV3 := ign3_4.ParseCompatibleVersion(rawIgn) + if errV3 == nil && !rptV3.IsFatal() { + return ignCfgV3, nil + } - if errV3_0.Error() == ign3error.ErrUnknownVersion.Error() { - ignCfgV2, rptV2, errV2 := ign2.Parse(rawIgn) - if errV2 == nil && !rptV2.IsFatal() { - return ignCfgV2, nil - } + // ParseCompatibleVersion differentiates between ErrUnknownVersion ("I know what it is and we don't support it") and + // ErrInvalidVersion ("I can't parse it to find out what it is"), but our old 3.2 logic didn't, so this is here to make sure + // our error message for invalid version is still helpful. + if errV3.Error() == ign3error.ErrInvalidVersion.Error() { + return ign3types.Config{}, fmt.Errorf("parsing Ignition config failed: invalid version. Supported spec versions: 2.2, 3.0, 3.1, 3.2, 3.3, 3.4") + } - // If the error is still UnknownVersion it's not a 3.2/3.1/3.0 or 2.x config, thus unsupported - if errV2.Error() == ign2error.ErrUnknownVersion.Error() { - return ign3types.Config{}, fmt.Errorf("parsing Ignition config failed: unknown version. Supported spec versions: 2.2, 3.0, 3.1, 3.2") - } - return ign3types.Config{}, fmt.Errorf("parsing Ignition spec v2 failed with error: %w\nReport: %v", errV2, rptV2) - } - return ign3types.Config{}, fmt.Errorf("parsing Ignition config spec v3.0 failed with error: %w\nReport: %v", errV3_0, rptV3_0) + if errV3.Error() == ign3error.ErrUnknownVersion.Error() { + ignCfgV2, rptV2, errV2 := ign2.Parse(rawIgn) + if errV2 == nil && !rptV2.IsFatal() { + return ignCfgV2, nil + } + + // If the error is still UnknownVersion it's not a 3.3/3.2/3.1/3.0 or 2.x config, thus unsupported + if errV2.Error() == ign2error.ErrUnknownVersion.Error() { + return ign3types.Config{}, fmt.Errorf("parsing Ignition config failed: unknown version. Supported spec versions: 2.2, 3.0, 3.1, 3.2, 3.3, 3.4") } - return ign3types.Config{}, fmt.Errorf("parsing Ignition config spec v3.1 failed with error: %w\nReport: %v", errV3_1, rptV3_1) + return ign3types.Config{}, fmt.Errorf("parsing Ignition spec v2 failed with error: %v\nReport: %v", errV2, rptV2) } - return ign3types.Config{}, fmt.Errorf("parsing Ignition config spec v3.2 failed with error: %w\nReport: %v", errV3_2, rptV3_2) + + return ign3types.Config{}, fmt.Errorf("parsing Ignition config spec v3 failed with error: %v\nReport: %v", errV3, rptV3) } // ParseAndConvertConfig parses rawIgn for both V2 and V3 ignition configs and returns @@ -530,7 +640,7 @@ func ParseAndConvertConfig(rawIgn []byte) (ign3types.Config, error) { if err != nil { return ign3types.Config{}, err } - convertedIgnV3, err := convertIgnition2to3(ignconfv2) + convertedIgnV3, err := convertIgnition22to34(ignconfv2) if err != nil { return ign3types.Config{}, fmt.Errorf("failed to convert Ignition config spec v2 to v3: %w", err) } @@ -549,7 +659,7 @@ func ParseAndConvertGzippedConfig(rawIgn []byte) (ign3types.Config, error) { out, err := decodeAndDecompressPayload(bytes.NewReader(rawIgn)) if err == nil { // Our payload was decoded and decompressed, so parse it as Ignition. - glog.V(2).Info("ignition config was base64-decoded and gunzipped successfully") + klog.V(2).Info("ignition config was base64-decoded and gunzipped successfully") return ParseAndConvertConfig(out) } @@ -557,18 +667,18 @@ func ParseAndConvertGzippedConfig(rawIgn []byte) (ign3types.Config, error) { // e.g.: $ gzip -9 ign_config.json var base64Err base64.CorruptInputError if errors.As(err, &base64Err) { - glog.V(2).Info("ignition config was not base64 encoded, trying to gunzip ignition config") + klog.V(2).Info("ignition config was not base64 encoded, trying to gunzip ignition config") out, err = decompressPayload(bytes.NewReader(rawIgn)) if err == nil { // We were able to decompress our payload, so let's try parsing it - glog.V(2).Info("ignition config was gunzipped successfully") + klog.V(2).Info("ignition config was gunzipped successfully") return ParseAndConvertConfig(out) } } // Our Ignition config is not gzipped, so let's try to serialize the raw Ignition directly. if errors.Is(err, errConfigNotGzipped) { - glog.V(2).Info("ignition config was not gzipped") + klog.V(2).Info("ignition config was not gzipped") return ParseAndConvertConfig(rawIgn) } @@ -614,7 +724,7 @@ func decompressPayload(r io.Reader) ([]byte, error) { defer gz.Close() - data, err := ioutil.ReadAll(gz) + data, err := io.ReadAll(gz) if err != nil { return nil, fmt.Errorf("decompression failed: %w", err) } @@ -676,7 +786,7 @@ func removeIgnDuplicateFilesUnitsUsers(ignConfig ign2types.Config) (ign2types.Co continue } } - glog.V(2).Infof("Found duplicate unit %v, appending dropin section", unitName) + klog.V(2).Infof("Found duplicate unit %v, appending dropin section", unitName) } continue } @@ -734,7 +844,8 @@ func TranspileCoreOSConfigToIgn(files, units []string) (*ign3types.Config, error if err != nil { return nil, fmt.Errorf("failed to transpile config to Ignition config %w\nTranslation set: %v", err, tSet) } - ign3_2config := translate3.Translate(translate3_1.Translate(ign3_0config)) + // TODO(jkyros): do we keep just...adding translations forever as we add more versions? :) + ign3_2config := translate3.Translate(translate3_3.Translate(translate3_2.Translate(translate3_1.Translate(ign3_0config)))) outConfig = ign3.Merge(outConfig, ign3_2config) } @@ -751,7 +862,7 @@ func TranspileCoreOSConfigToIgn(files, units []string) (*ign3types.Config, error if err != nil { return nil, fmt.Errorf("failed to transpile config to Ignition config %w\nTranslation set: %v", err, tSet) } - ign3_2config := translate3.Translate(translate3_1.Translate(ign3_0config)) + ign3_2config := translate3.Translate(translate3_3.Translate(translate3_2.Translate(translate3_1.Translate(ign3_0config)))) outConfig = ign3.Merge(outConfig, ign3_2config) } @@ -829,7 +940,7 @@ func dedupePasswdUserSSHKeys(passwdUser ign2types.PasswdUser) ign2types.PasswdUs for _, sshKey := range passwdUser.SSHAuthorizedKeys { if _, isKnown := knownSSHKeys[sshKey]; isKnown { // We've seen this key before warn and move on. - glog.Warningf("duplicate SSH public key found: %s", sshKey) + klog.Warningf("duplicate SSH public key found: %s", sshKey) continue } @@ -863,7 +974,7 @@ func CalculateConfigFileDiffs(oldIgnConfig, newIgnConfig *ign3types.Config) []st _, ok := newFileSet[path] if !ok { // debug: remove - glog.Infof("File diff: %v was deleted", path) + klog.Infof("File diff: %v was deleted", path) diffFileSet = append(diffFileSet, path) } } @@ -873,11 +984,11 @@ func CalculateConfigFileDiffs(oldIgnConfig, newIgnConfig *ign3types.Config) []st oldFile, ok := oldFileSet[path] if !ok { // debug: remove - glog.Infof("File diff: %v was added", path) + klog.Infof("File diff: %v was added", path) diffFileSet = append(diffFileSet, path) } else if !reflect.DeepEqual(oldFile, newFile) { // debug: remove - glog.Infof("File diff: detected change to %v", newFile.Path) + klog.Infof("File diff: detected change to %v", newFile.Path) diffFileSet = append(diffFileSet, path) } } @@ -947,53 +1058,6 @@ func GetIgnitionFileDataByPath(config *ign3types.Config, path string) ([]byte, e return nil, nil } -// GetNewestCertificatesFromPEMBundle breaks a pem-encoded bundle out into its component certificates -func GetCertificatesFromPEMBundle(pemBytes []byte) ([]*x509.Certificate, error) { - var certs []*x509.Certificate - // There can be multiple certificates in the file - for { - // Decode a block to parse - block, rest := pem.Decode(pemBytes) - // Once we get no more blocks, we've read all the certs - if block == nil { - break - } - // Right now we just care about certificates, not keys - if block.Type == "CERTIFICATE" { - cert, err := x509.ParseCertificate(block.Bytes) - if err != nil { - // This isn't fatal, *this* cert could just be junk, next one could be okay - glog.Warningf("Failed to parse certificate: %v", err.Error()) - } else { - certs = append(certs, cert) - } - } - // Keep reading from where we left off - pemBytes = rest - } - return certs, nil -} - -// GetLongestValidCertificate returns the latest-expiring certificate from a given list of certificates -// whose Subject.CommonName also matches any of the given common-name prefixes -func GetLongestValidCertificate(certificateList []*x509.Certificate, subjectPrefixes []string) *x509.Certificate { - // Sort is smallest-to-largest, so we're putting the cert with the latest expiry date at the top - sort.Slice(certificateList, func(i, j int) bool { - return certificateList[i].NotAfter.After(certificateList[j].NotAfter) - }) - // For each certificate in our list - for _, certificate := range certificateList { - // Check it against our prefixes - for _, prefix := range subjectPrefixes { - // If it matches, this is the latest-expiring one since it's closest to the "top" - if strings.HasPrefix(certificate.Subject.CommonName, prefix) { - return certificate - } - } - } - return nil -} - // GetDefaultBaseImageContainer is kind of a "soft feature gate" for using the "new format" image by default, its behavior // is determined by the "UseNewFormatImageByDefault" boolean func GetDefaultBaseImageContainer(cconfigspec *mcfgv1.ControllerConfigSpec) string { @@ -1036,3 +1100,122 @@ func indent(spaces int, v string) string { pad := strings.Repeat(" ", spaces) return pad + strings.ReplaceAll(v, "\n", "\n"+pad) } + +// ioutil.ReadDir has been deprecated with os.ReadDir. +// ioutil.ReadDir() used to return []fs.FileInfo but os.ReadDir() returns []fs.DirEntry. +// Making it helper function so that we can reuse coversion of []fs.DirEntry into []fs.FileInfo +// Implementation to fetch fileInfo is taken from https://pkg.go.dev/io/ioutil#ReadDir +func ReadDir(path string) ([]fs.FileInfo, error) { + entries, err := os.ReadDir(path) + if err != nil { + return nil, fmt.Errorf("failed to read dir %q: %w", path, err) + } + infos := make([]fs.FileInfo, 0, len(entries)) + for _, entry := range entries { + info, err := entry.Info() + if err != nil { + return nil, fmt.Errorf("failed to fetch fileInfo of %q in %q: %w", entry.Name(), path, err) + } + infos = append(infos, info) + } + return infos, nil +} + +func NamespacedEventRecorder(delegate record.EventRecorder) record.EventRecorder { + return namespacedEventRecorder{delegate: delegate} +} + +type namespacedEventRecorder struct { + delegate record.EventRecorder +} + +func ensureEventNamespace(object runtime.Object) runtime.Object { + orig, err := reference.GetReference(scheme.Scheme, object) + if err != nil { + return object + } + ret := orig.DeepCopy() + if ret.Namespace == "" { + // the ref must set a namespace to avoid going into default. + // cluster operators are clusterscoped and "" becomes default. Even though the clusteroperator + // is not in this namespace, the logical namespace of this operator is the openshift-machine-config-operator. + ret.Namespace = MCONamespace + } + + return ret +} + +var _ record.EventRecorder = namespacedEventRecorder{} + +func (n namespacedEventRecorder) Event(object runtime.Object, eventtype, reason, message string) { + n.delegate.Event(ensureEventNamespace(object), eventtype, reason, message) +} + +func (n namespacedEventRecorder) Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) { + n.delegate.Eventf(ensureEventNamespace(object), eventtype, reason, messageFmt, args...) +} + +func (n namespacedEventRecorder) AnnotatedEventf(object runtime.Object, annotations map[string]string, eventtype, reason, messageFmt string, args ...interface{}) { + n.delegate.AnnotatedEventf(ensureEventNamespace(object), annotations, eventtype, reason, messageFmt, args...) +} + +func IsLayeredPool(pool *mcfgv1.MachineConfigPool) bool { + if _, ok := pool.Labels[LayeringEnabledPoolLabel]; ok { + return true + } + return false +} + +// DockerConfigJSON represents ~/.docker/config.json file info +type DockerConfigJSON struct { + Auths DockerConfig `json:"auths"` +} + +// DockerConfig represents the config file used by the docker CLI. +// This config that represents the credentials that should be used +// when pulling images from specific image repositories. +type DockerConfig map[string]DockerConfigEntry + +// DockerConfigEntry wraps a docker config as a entry +type DockerConfigEntry struct { + Username string `json:"username"` + Password string `json:"password"` + Email string `json:"email"` + Auth string `json:"auth"` +} + +// Merges kubernetes.io/dockercfg type secrets into a JSON map. +// Returns an error on failure to marshal the incoming secret. +func MergeDockerConfigstoJSONMap(secretRaw []byte, auths map[string]DockerConfigEntry) error { + var dockerConfig DockerConfig + // Unmarshal raw JSON + err := json.Unmarshal(secretRaw, &dockerConfig) + if err != nil { + return fmt.Errorf(" unmarshal failure: %w", err) + } + // Step through the hosts and add them to the JSON map + for host := range dockerConfig { + auths[host] = dockerConfig[host] + } + return nil +} + +// Converts a kubernetes.io/dockerconfigjson type secret to a +// kubernetes.io/dockercfg type secret. Returns an error on failure +// if the incoming secret is not formatted correctly. +func ConvertSecretTodockercfg(secretBytes []byte) ([]byte, error) { + type newStyleAuth struct { + Auths map[string]interface{} `json:"auths,omitempty"` + } + + // Un-marshal the new-style secret first + newStyleDecoded := &newStyleAuth{} + if err := json.Unmarshal(secretBytes, newStyleDecoded); err != nil { + return nil, fmt.Errorf("could not decode new-style pull secret: %w", err) + } + + // Marshal with old style, which is everything inside the Auths field + out, err := json.Marshal(newStyleDecoded.Auths) + + return out, err +} diff --git a/vendor/github.com/openshift/machine-config-operator/pkg/controller/common/layered_node_state.go b/vendor/github.com/openshift/machine-config-operator/pkg/controller/common/layered_node_state.go new file mode 100644 index 000000000..5d730d37e --- /dev/null +++ b/vendor/github.com/openshift/machine-config-operator/pkg/controller/common/layered_node_state.go @@ -0,0 +1,215 @@ +package common + +import ( + "fmt" + + mcfgv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1" + daemonconsts "github.com/openshift/machine-config-operator/pkg/daemon/constants" + corev1 "k8s.io/api/core/v1" +) + +// This is intended to provide a singular way to interrogate node objects to +// determine if they're in a specific state. A secondary goal is to provide a +// singular way to mutate node objects for the purposes of updating their +// current configurations. +// +// The eventual goal is to replace all of the node status functions in +// status.go with this code, then repackage this so that it can be used by any +// portion of the MCO which needs to interrogate or mutate node state. +type LayeredNodeState struct { + node *corev1.Node +} + +func NewLayeredNodeState(n *corev1.Node) *LayeredNodeState { + return &LayeredNodeState{node: n} +} + +// Augements the isNodeDoneAt() check with determining if the current / desired +// image annotations match the pools' values. +func (l *LayeredNodeState) IsDoneAt(mcp *mcfgv1.MachineConfigPool) bool { + return isNodeDoneAt(l.node, mcp) && l.isDesiredImageEqualToPool(mcp) && l.isCurrentImageEqualToPool(mcp) +} + +// The original behavior of getUnavailableMachines is: getUnavailableMachines +// returns the set of nodes which are either marked unscheduleable, or have a +// MCD actively working. If the MCD is actively working (or hasn't started) +// then the node *may* go unschedulable in the future, so we don't want to +// potentially start another node update exceeding our maxUnavailable. Somewhat +// the opposite of getReadyNodes(). +// +// This augments this check by determining if the desired iamge annotation is +// equal to what the pool expects. +func (l *LayeredNodeState) IsUnavailable(mcp *mcfgv1.MachineConfigPool) bool { + return isNodeUnavailable(l.node) && l.isDesiredImageEqualToPool(mcp) +} + +// Checks that the desired machineconfig and image annotations equal the ones +// specified by the pool. +func (l *LayeredNodeState) IsDesiredEqualToPool(mcp *mcfgv1.MachineConfigPool) bool { + return l.isDesiredMachineConfigEqualToPool(mcp) && l.isDesiredImageEqualToPool(mcp) +} + +// Compares the MachineConfig specified by the MachineConfigPool to the one +// specified by the node's desired MachineConfig annotation. +func (l *LayeredNodeState) isDesiredMachineConfigEqualToPool(mcp *mcfgv1.MachineConfigPool) bool { + return l.node.Annotations[daemonconsts.DesiredMachineConfigAnnotationKey] == mcp.Spec.Configuration.Name +} + +// Determines if the nodes desired image is equal to the expected value from +// the MachineConfigPool. +func (l *LayeredNodeState) isDesiredImageEqualToPool(mcp *mcfgv1.MachineConfigPool) bool { + return l.isImageAnnotationEqualToPool(daemonconsts.DesiredImageAnnotationKey, mcp) +} + +// Determines if the nodes current image is equal to the expected value from +// the MachineConfigPool. +func (l *LayeredNodeState) isCurrentImageEqualToPool(mcp *mcfgv1.MachineConfigPool) bool { + return l.isImageAnnotationEqualToPool(daemonconsts.CurrentImageAnnotationKey, mcp) +} + +// Determines if a nodes' image annotation is equal to the expected value from +// the MachineConfigPool. If the pool is layered, this value should equal the +// OS image value, if the value is available. If the pool is not layered, then +// any image annotations should not be present on the node. +func (l *LayeredNodeState) isImageAnnotationEqualToPool(anno string, mcp *mcfgv1.MachineConfigPool) bool { + lps := NewLayeredPoolState(mcp) + + val, ok := l.node.Annotations[anno] + + if lps.IsLayered() && lps.HasOSImage() { + // If the pool is layered and has an OS image, check that it equals the + // node annotations' value. + return lps.GetOSImage() == val + } + + // If the pool is not layered, this annotation should not exist. + return val == "" || !ok +} + +// Sets the desired annotations from the MachineConfigPool, according to the +// following rules: +// +// 1. The desired MachineConfig annotation will always be set to match the one +// specified in the MachineConfigPool. +// 2. If the pool is layered and has the OS image available, it will set the +// desired image annotation. +// 3. If the pool is not layered and does not have the OS image available, it +// will remove the desired image annotation. +// +// Note: This will create a deep copy of the node object first to avoid +// mutating any underlying caches. +func (l *LayeredNodeState) SetDesiredStateFromPool(mcp *mcfgv1.MachineConfigPool) { + node := l.Node() + if node.Annotations == nil { + node.Annotations = map[string]string{} + } + + node.Annotations[daemonconsts.DesiredMachineConfigAnnotationKey] = mcp.Spec.Configuration.Name + + lps := NewLayeredPoolState(mcp) + + if lps.IsLayered() && lps.HasOSImage() { + node.Annotations[daemonconsts.DesiredImageAnnotationKey] = lps.GetOSImage() + } else { + delete(node.Annotations, daemonconsts.DesiredImageAnnotationKey) + } + + l.node = node +} + +// Returns a deep copy of the underlying node object. +func (l *LayeredNodeState) Node() *corev1.Node { + return l.node.DeepCopy() +} + +// All functions below this line were copy / pasted from +// pkg/controller/node/status.go. A future cleanup effort will integrate these +// more seamlessly into the above struct. + +// isNodeDone returns true if the current == desired and the MCD has marked done. +func isNodeDone(node *corev1.Node) bool { + if node.Annotations == nil { + return false + } + cconfig, ok := node.Annotations[daemonconsts.CurrentMachineConfigAnnotationKey] + if !ok || cconfig == "" { + return false + } + dconfig, ok := node.Annotations[daemonconsts.DesiredMachineConfigAnnotationKey] + if !ok || dconfig == "" { + return false + } + + return cconfig == dconfig && isNodeMCDState(node, daemonconsts.MachineConfigDaemonStateDone) +} + +// isNodeDoneAt checks whether a node is fully updated to a targetConfig +func isNodeDoneAt(node *corev1.Node, pool *mcfgv1.MachineConfigPool) bool { + return isNodeDone(node) && node.Annotations[daemonconsts.CurrentMachineConfigAnnotationKey] == pool.Spec.Configuration.Name +} + +// isNodeUnavailable is a helper function for getUnavailableMachines +// See the docs of getUnavailableMachines for more info +func isNodeUnavailable(node *corev1.Node) bool { + // Unready nodes are unavailable + if !isNodeReady(node) { + return true + } + // Ready nodes are not unavailable + if isNodeDone(node) { + return false + } + // Now we know the node isn't ready - the current config must not + // equal target. We want to further filter down on the MCD state. + // If a MCD is in a terminal (failing) state then we can safely retarget it. + // to a different config. Or to say it another way, a node is unavailable + // if the MCD is working, or hasn't started work but the configs differ. + return !isNodeMCDFailing(node) +} + +// isNodeMCDState checks the MCD state against the state parameter +func isNodeMCDState(node *corev1.Node, state string) bool { + dstate, ok := node.Annotations[daemonconsts.MachineConfigDaemonStateAnnotationKey] + if !ok || dstate == "" { + return false + } + + return dstate == state +} + +func checkNodeReady(node *corev1.Node) error { + for i := range node.Status.Conditions { + cond := &node.Status.Conditions[i] + // We consider the node for scheduling only when its: + // - NodeReady condition status is ConditionTrue, + // - NodeDiskPressure condition status is ConditionFalse, + // - NodeNetworkUnavailable condition status is ConditionFalse. + if cond.Type == corev1.NodeReady && cond.Status != corev1.ConditionTrue { + return fmt.Errorf("node %s is reporting NotReady=%v", node.Name, cond.Status) + } + if cond.Type == corev1.NodeDiskPressure && cond.Status != corev1.ConditionFalse { + return fmt.Errorf("node %s is reporting OutOfDisk=%v", node.Name, cond.Status) + } + if cond.Type == corev1.NodeNetworkUnavailable && cond.Status != corev1.ConditionFalse { + return fmt.Errorf("node %s is reporting NetworkUnavailable=%v", node.Name, cond.Status) + } + } + // Ignore nodes that are marked unschedulable + if node.Spec.Unschedulable { + return fmt.Errorf("node %s is reporting Unschedulable", node.Name) + } + return nil +} + +func isNodeReady(node *corev1.Node) bool { + return checkNodeReady(node) == nil +} + +// isNodeMCDFailing checks if the MCD has unsuccessfully applied an update +func isNodeMCDFailing(node *corev1.Node) bool { + if node.Annotations[daemonconsts.CurrentMachineConfigAnnotationKey] == node.Annotations[daemonconsts.DesiredMachineConfigAnnotationKey] { + return false + } + return isNodeMCDState(node, daemonconsts.MachineConfigDaemonStateDegraded) || + isNodeMCDState(node, daemonconsts.MachineConfigDaemonStateUnreconcilable) +} diff --git a/vendor/github.com/openshift/machine-config-operator/pkg/controller/common/layered_pool_state.go b/vendor/github.com/openshift/machine-config-operator/pkg/controller/common/layered_pool_state.go new file mode 100644 index 000000000..da060386e --- /dev/null +++ b/vendor/github.com/openshift/machine-config-operator/pkg/controller/common/layered_pool_state.go @@ -0,0 +1,69 @@ +package common + +import ( + mcfgv1 "github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1" +) + +// This is intended to provide a singular way to interrogate MachineConfigPool +// objects to determine if they're in a specific state or not. The eventual +// goal is to use this to mutate the MachineConfigPool object to provide a +// single and consistent interface for that purpose. In this current state, we +// do not perform any mutations. +type LayeredPoolState struct { + pool *mcfgv1.MachineConfigPool +} + +func NewLayeredPoolState(pool *mcfgv1.MachineConfigPool) *LayeredPoolState { + return &LayeredPoolState{pool: pool} +} + +// Determines if a MachineConfigPool is layered by looking for the layering +// enabled label. +func (l *LayeredPoolState) IsLayered() bool { + if l.pool == nil { + return false + } + + if l.pool.Labels == nil { + return false + } + + return IsLayeredPool(l.pool) +} + +// Returns the OS image, if one is present. +func (l *LayeredPoolState) GetOSImage() string { + osImage := l.pool.Annotations[ExperimentalNewestLayeredImageEquivalentConfigAnnotationKey] + return osImage +} + +// Determines if a given MachineConfigPool has an available OS image. Returns +// false if the annotation is missing or set to an empty string. +func (l *LayeredPoolState) HasOSImage() bool { + if l.pool.Labels == nil { + return false + } + + val, ok := l.pool.Annotations[ExperimentalNewestLayeredImageEquivalentConfigAnnotationKey] + return ok && val != "" +} + +// Determines if an OS image build is a success. +func (l *LayeredPoolState) IsBuildSuccess() bool { + return mcfgv1.IsMachineConfigPoolConditionTrue(l.pool.Status.Conditions, mcfgv1.MachineConfigPoolBuildSuccess) +} + +// Determines if an OS image build is pending. +func (l *LayeredPoolState) IsBuildPending() bool { + return mcfgv1.IsMachineConfigPoolConditionTrue(l.pool.Status.Conditions, mcfgv1.MachineConfigPoolBuildPending) +} + +// Determines if an OS image build is in progress. +func (l *LayeredPoolState) IsBuilding() bool { + return mcfgv1.IsMachineConfigPoolConditionTrue(l.pool.Status.Conditions, mcfgv1.MachineConfigPoolBuilding) +} + +// Determines if an OS image build has failed. +func (l *LayeredPoolState) IsBuildFailure() bool { + return mcfgv1.IsMachineConfigPoolConditionTrue(l.pool.Status.Conditions, mcfgv1.MachineConfigPoolBuildFailed) +} diff --git a/vendor/github.com/openshift/machine-config-operator/pkg/controller/common/metrics.go b/vendor/github.com/openshift/machine-config-operator/pkg/controller/common/metrics.go index 8a5fa45a4..7ffe83c33 100644 --- a/vendor/github.com/openshift/machine-config-operator/pkg/controller/common/metrics.go +++ b/vendor/github.com/openshift/machine-config-operator/pkg/controller/common/metrics.go @@ -2,12 +2,14 @@ package common import ( "context" + "crypto/tls" "fmt" "net/http" + "strings" - "github.com/golang/glog" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" + "k8s.io/klog/v2" ) const ( @@ -17,13 +19,6 @@ const ( // MCC Metrics var ( - // MachineConfigControllerPausedPoolKubeletCA logs when a certificate rotation is being held up by pause - MachineConfigControllerPausedPoolKubeletCA = prometheus.NewGaugeVec( - prometheus.GaugeOpts{ - Name: "machine_config_controller_paused_pool_kubelet_ca", - Help: "Set to the unix timestamp in utc of the current certificate expiry date if a certificate rotation is pending in specified paused pool", - }, []string{"pool"}) - // OSImageURLOverride tells whether cluster is using default OS image or has been overridden by user OSImageURLOverride = prometheus.NewGaugeVec( prometheus.GaugeOpts{ @@ -32,25 +27,31 @@ var ( }, []string{"pool"}) // MCCDrainErr logs failed drain - MCCDrainErr = prometheus.NewGauge( + MCCDrainErr = prometheus.NewGaugeVec( prometheus.GaugeOpts{ Name: "mcc_drain_err", Help: "logs failed drain", - }) + }, []string{"node"}) + // MCCPoolAlert logs when the pool configuration changes in a way the user should know. + MCCPoolAlert = prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "mcc_pool_alert", + Help: "pool status alert", + }, []string{"node"}) ) func RegisterMCCMetrics() error { err := RegisterMetrics([]prometheus.Collector{ - MachineConfigControllerPausedPoolKubeletCA, OSImageURLOverride, MCCDrainErr, + MCCPoolAlert, }) if err != nil { return fmt.Errorf("could not register machine-config-controller metrics: %w", err) } - MCCDrainErr.Set(0) + MCCDrainErr.Reset() return nil } @@ -72,29 +73,77 @@ func StartMetricsListener(addr string, stopCh <-chan struct{}, registerFunc func addr = DefaultBindAddress } - glog.Info("Registering Prometheus metrics") + klog.Info("Registering Prometheus metrics") if err := registerFunc(); err != nil { - glog.Errorf("unable to register metrics: %v", err) + klog.Errorf("unable to register metrics: %v", err) // No sense in continuing starting the listener if this fails return } - glog.Infof("Starting metrics listener on %s", addr) + klog.Infof("Starting metrics listener on %s", addr) mux := http.NewServeMux() mux.Handle("/metrics", promhttp.Handler()) - s := http.Server{Addr: addr, Handler: mux} + s := http.Server{ + TLSConfig: &tls.Config{ + MinVersion: tls.VersionTLS12, + NextProtos: []string{"http/1.1"}, + CipherSuites: cipherOrder(), + }, + TLSNextProto: make(map[string]func(*http.Server, *tls.Conn, http.Handler)), + Addr: addr, + Handler: mux} go func() { if err := s.ListenAndServe(); err != nil && err != http.ErrServerClosed { - glog.Errorf("metrics listener exited with error: %v", err) + klog.Errorf("metrics listener exited with error: %v", err) } }() <-stopCh if err := s.Shutdown(context.Background()); err != nil { if err != http.ErrServerClosed { - glog.Errorf("error stopping metrics listener: %v", err) + klog.Errorf("error stopping metrics listener: %v", err) } } else { - glog.Infof("Metrics listener successfully stopped") + klog.Infof("Metrics listener successfully stopped") + } +} + +func cipherOrder() []uint16 { + var first []uint16 + var second []uint16 + + allowable := func(c *tls.CipherSuite) bool { + // Disallow block ciphers using straight SHA1 + // See: https://tools.ietf.org/html/rfc7540#appendix-A + if strings.HasSuffix(c.Name, "CBC_SHA") { + return false + } + // 3DES is considered insecure + if strings.Contains(c.Name, "3DES") { + return false + } + return true } + + for _, c := range tls.CipherSuites() { + for _, v := range c.SupportedVersions { + if v == tls.VersionTLS13 { + first = append(first, c.ID) + } + if v == tls.VersionTLS12 && allowable(c) { + inFirst := false + for _, id := range first { + if c.ID == id { + inFirst = true + break + } + } + if !inFirst { + second = append(second, c.ID) + } + } + } + } + + return append(first, second...) } diff --git a/vendor/github.com/openshift/machine-config-operator/pkg/daemon/constants/constants.go b/vendor/github.com/openshift/machine-config-operator/pkg/daemon/constants/constants.go index e51f77cff..7f1a42c0b 100644 --- a/vendor/github.com/openshift/machine-config-operator/pkg/daemon/constants/constants.go +++ b/vendor/github.com/openshift/machine-config-operator/pkg/daemon/constants/constants.go @@ -8,6 +8,11 @@ const ( // // XXX + // CurrentImageAnnotationKey is used to get the current OS image pullspec for a machine + CurrentImageAnnotationKey = "machineconfiguration.openshift.io/currentImage" + // DesiredImageAnnotationKey is used to specify the desired OS image pullspec for a machine + DesiredImageAnnotationKey = "machineconfiguration.openshift.io/desiredImage" + // CurrentMachineConfigAnnotationKey is used to fetch current MachineConfig for a machine CurrentMachineConfigAnnotationKey = "machineconfiguration.openshift.io/currentConfig" // DesiredMachineConfigAnnotationKey is used to specify the desired MachineConfig for a machine @@ -27,6 +32,8 @@ const ( ClusterControlPlaneTopologyAnnotationKey = "machineconfiguration.openshift.io/controlPlaneTopology" // OpenShiftOperatorManagedLabel is used to filter out kube objects that don't need to be synced by the MCO OpenShiftOperatorManagedLabel = "openshift.io/operator-managed" + // ControllerConfigResourceVersionKey is used for the certificate writer to indicate the last controllerconfig object it synced upon + ControllerConfigResourceVersionKey = "machineconfiguration.openshift.io/lastSyncedControllerConfigResourceVersion" // GeneratedByVersionAnnotationKey is used to tag the controllerconfig to synchronize the MCO and MCC GeneratedByVersionAnnotationKey = "machineconfiguration.openshift.io/generated-by-version" @@ -59,9 +66,6 @@ const ( // For more information, see https://github.com/openshift/pivot/pull/25/commits/c77788a35d7ee4058d1410e89e6c7937bca89f6c#diff-04c6e90faac2675aa89e2176d2eec7d8R44 EtcPivotFile = "/etc/pivot/image-pullspec" - // HostSelfBinary is the path where we copy our own binary to the host - HostSelfBinary = "/run/bin/machine-config-daemon" - // MachineConfigEncapsulatedPath contains all of the data from a MachineConfig object // except the Spec/Config object; this supports inverting+encapsulating a MachineConfig // object so that Ignition can process it on first boot, and then the MCD can act on @@ -84,4 +88,13 @@ const ( // changes to registries.conf will cause a crio reload and require extra logic about whether to drain ContainerRegistryConfPath = "/etc/containers/registries.conf" + + // SSH Keys for user "core" will only be written at /home/core/.ssh + CoreUserSSHPath = "/home/" + CoreUserName + "/.ssh" + + // SSH keys in RHCOS 8 will be written to /home/core/.ssh/authorized_keys + RHCOS8SSHKeyPath = CoreUserSSHPath + "/authorized_keys" + + // SSH keys in RHCOS 9 / FCOS / SCOS will be written to /home/core/.ssh/authorized_keys.d/ignition + RHCOS9SSHKeyPath = CoreUserSSHPath + "/authorized_keys.d/ignition" ) diff --git a/vendor/github.com/openshift/machine-config-operator/pkg/generated/clientset/versioned/clientset.go b/vendor/github.com/openshift/machine-config-operator/pkg/generated/clientset/versioned/clientset.go index 987d92004..55308a34a 100644 --- a/vendor/github.com/openshift/machine-config-operator/pkg/generated/clientset/versioned/clientset.go +++ b/vendor/github.com/openshift/machine-config-operator/pkg/generated/clientset/versioned/clientset.go @@ -17,8 +17,7 @@ type Interface interface { MachineconfigurationV1() machineconfigurationv1.MachineconfigurationV1Interface } -// Clientset contains the clients for groups. Each group has exactly one -// version included in a Clientset. +// Clientset contains the clients for groups. type Clientset struct { *discovery.DiscoveryClient machineconfigurationV1 *machineconfigurationv1.MachineconfigurationV1Client diff --git a/vendor/github.com/openshift/machine-config-operator/pkg/generated/clientset/versioned/scheme/register.go b/vendor/github.com/openshift/machine-config-operator/pkg/generated/clientset/versioned/scheme/register.go index f67fd82d2..b96a438e9 100644 --- a/vendor/github.com/openshift/machine-config-operator/pkg/generated/clientset/versioned/scheme/register.go +++ b/vendor/github.com/openshift/machine-config-operator/pkg/generated/clientset/versioned/scheme/register.go @@ -21,14 +21,14 @@ var localSchemeBuilder = runtime.SchemeBuilder{ // AddToScheme adds all types of this clientset into the given scheme. This allows composition // of clientsets, like in: // -// import ( -// "k8s.io/client-go/kubernetes" -// clientsetscheme "k8s.io/client-go/kubernetes/scheme" -// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" -// ) +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) // -// kclientset, _ := kubernetes.NewForConfig(c) -// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) // // After this, RawExtensions in Kubernetes types will serialize kube-aggregator types // correctly. diff --git a/vendor/github.com/openshift/machine-config-operator/pkg/generated/informers/externalversions/factory.go b/vendor/github.com/openshift/machine-config-operator/pkg/generated/informers/externalversions/factory.go index 0f35d0e98..4b588a6cc 100644 --- a/vendor/github.com/openshift/machine-config-operator/pkg/generated/informers/externalversions/factory.go +++ b/vendor/github.com/openshift/machine-config-operator/pkg/generated/informers/externalversions/factory.go @@ -31,6 +31,11 @@ type sharedInformerFactory struct { // startedInformers is used for tracking which informers have been started. // This allows Start() to be called multiple times safely. startedInformers map[reflect.Type]bool + // wg tracks how many goroutines were started. + wg sync.WaitGroup + // shuttingDown is true when Shutdown has been called. It may still be running + // because it needs to wait for goroutines. + shuttingDown bool } // WithCustomResyncConfig sets a custom resync period for the specified informer types. @@ -91,20 +96,39 @@ func NewSharedInformerFactoryWithOptions(client versioned.Interface, defaultResy return factory } -// Start initializes all requested informers. func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) { f.lock.Lock() defer f.lock.Unlock() + if f.shuttingDown { + return + } + for informerType, informer := range f.informers { if !f.startedInformers[informerType] { - go informer.Run(stopCh) + f.wg.Add(1) + // We need a new variable in each loop iteration, + // otherwise the goroutine would use the loop variable + // and that keeps changing. + informer := informer + go func() { + defer f.wg.Done() + informer.Run(stopCh) + }() f.startedInformers[informerType] = true } } } -// WaitForCacheSync waits for all started informers' cache were synced. +func (f *sharedInformerFactory) Shutdown() { + f.lock.Lock() + f.shuttingDown = true + f.lock.Unlock() + + // Will return immediately if there is nothing to wait for. + f.wg.Wait() +} + func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool { informers := func() map[reflect.Type]cache.SharedIndexInformer { f.lock.Lock() @@ -151,11 +175,58 @@ func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internal // SharedInformerFactory provides shared informers for resources in all known // API group versions. +// +// It is typically used like this: +// +// ctx, cancel := context.Background() +// defer cancel() +// factory := NewSharedInformerFactory(client, resyncPeriod) +// defer factory.WaitForStop() // Returns immediately if nothing was started. +// genericInformer := factory.ForResource(resource) +// typedInformer := factory.SomeAPIGroup().V1().SomeType() +// factory.Start(ctx.Done()) // Start processing these informers. +// synced := factory.WaitForCacheSync(ctx.Done()) +// for v, ok := range synced { +// if !ok { +// fmt.Fprintf(os.Stderr, "caches failed to sync: %v", v) +// return +// } +// } +// +// // Creating informers can also be created after Start, but then +// // Start must be called again: +// anotherGenericInformer := factory.ForResource(resource) +// factory.Start(ctx.Done()) type SharedInformerFactory interface { internalinterfaces.SharedInformerFactory - ForResource(resource schema.GroupVersionResource) (GenericInformer, error) + + // Start initializes all requested informers. They are handled in goroutines + // which run until the stop channel gets closed. + Start(stopCh <-chan struct{}) + + // Shutdown marks a factory as shutting down. At that point no new + // informers can be started anymore and Start will return without + // doing anything. + // + // In addition, Shutdown blocks until all goroutines have terminated. For that + // to happen, the close channel(s) that they were started with must be closed, + // either before Shutdown gets called or while it is waiting. + // + // Shutdown may be called multiple times, even concurrently. All such calls will + // block until all goroutines have terminated. + Shutdown() + + // WaitForCacheSync blocks until all started informers' caches were synced + // or the stop channel gets closed. WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool + // ForResource gives generic access to a shared informer of the matching type. + ForResource(resource schema.GroupVersionResource) (GenericInformer, error) + + // InternalInformerFor returns the SharedIndexInformer for obj using an internal + // client. + InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer + Machineconfiguration() machineconfigurationopenshiftio.Interface } diff --git a/vendor/github.com/openshift/machine-config-operator/pkg/version/version.go b/vendor/github.com/openshift/machine-config-operator/pkg/version/version.go new file mode 100644 index 000000000..022258d1f --- /dev/null +++ b/vendor/github.com/openshift/machine-config-operator/pkg/version/version.go @@ -0,0 +1,54 @@ +package version + +import ( + "fmt" + "os" +) + +const ( + releaseVersionEnv = "RELEASE_VERSION" +) + +var ( + // ReleaseVersion is the version of the openshift release. + // This will be injected by the payload build process. + ReleaseVersion = "0.0.1-snapshot" + + // Raw is the string representation of the version. This will be replaced + // with the calculated version at build time. + Raw = "v0.0.0-was-not-built-properly" + + // Hash is the git hash we've built the MCO with + Hash = "was-not-built-properly" + + // String is the human-friendly representation of the version. + String = fmt.Sprintf("MachineConfigOperator %s", Raw) + + // FCOS is a setting to enable Fedora CoreOS-only modifications + FCOS = false + + // SCOS is a setting to enable CentOS Stream CoreOS-only modifications + SCOS = false +) + +// IsFCOS returns true if Fedora CoreOS-only modifications are enabled +func IsFCOS() bool { + return FCOS +} + +// IsSCOS returns true if CentOS Stream CoreOS-only modifications are enabled +func IsSCOS() bool { + return SCOS +} + +func init() { + // TODO: Remove the following env var override to deprecated RELEASE_VERSION. + // This is only here for backwards compatibility with the old build process. + // For now, it will prepopulate the ReleaseVersion with the value of the env var + // prior to flag parsing. This will allow the flag to override the env var. + // In the future, we should remove this and only use the flag. + rv := os.Getenv(releaseVersionEnv) + if rv != "" { + ReleaseVersion = rv + } +} diff --git a/vendor/github.com/openshift/machine-config-operator/pkg/version/version_fcos.go b/vendor/github.com/openshift/machine-config-operator/pkg/version/version_fcos.go new file mode 100644 index 000000000..1c95e4228 --- /dev/null +++ b/vendor/github.com/openshift/machine-config-operator/pkg/version/version_fcos.go @@ -0,0 +1,7 @@ +//go:build fcos + +package version + +func init() { + FCOS = true +} diff --git a/vendor/github.com/openshift/machine-config-operator/pkg/version/version_scos.go b/vendor/github.com/openshift/machine-config-operator/pkg/version/version_scos.go new file mode 100644 index 000000000..103aeac21 --- /dev/null +++ b/vendor/github.com/openshift/machine-config-operator/pkg/version/version_scos.go @@ -0,0 +1,7 @@ +//go:build scos + +package version + +func init() { + SCOS = true +} diff --git a/vendor/github.com/robfig/cron/.gitignore b/vendor/github.com/robfig/cron/.gitignore new file mode 100644 index 000000000..00268614f --- /dev/null +++ b/vendor/github.com/robfig/cron/.gitignore @@ -0,0 +1,22 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe diff --git a/vendor/github.com/robfig/cron/.travis.yml b/vendor/github.com/robfig/cron/.travis.yml new file mode 100644 index 000000000..4f2ee4d97 --- /dev/null +++ b/vendor/github.com/robfig/cron/.travis.yml @@ -0,0 +1 @@ +language: go diff --git a/vendor/github.com/robfig/cron/LICENSE b/vendor/github.com/robfig/cron/LICENSE new file mode 100644 index 000000000..3a0f627ff --- /dev/null +++ b/vendor/github.com/robfig/cron/LICENSE @@ -0,0 +1,21 @@ +Copyright (C) 2012 Rob Figueiredo +All Rights Reserved. + +MIT LICENSE + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/robfig/cron/README.md b/vendor/github.com/robfig/cron/README.md new file mode 100644 index 000000000..ec40c95fc --- /dev/null +++ b/vendor/github.com/robfig/cron/README.md @@ -0,0 +1,6 @@ +[![GoDoc](http://godoc.org/github.com/robfig/cron?status.png)](http://godoc.org/github.com/robfig/cron) +[![Build Status](https://travis-ci.org/robfig/cron.svg?branch=master)](https://travis-ci.org/robfig/cron) + +# cron + +Documentation here: https://godoc.org/github.com/robfig/cron diff --git a/vendor/github.com/robfig/cron/constantdelay.go b/vendor/github.com/robfig/cron/constantdelay.go new file mode 100644 index 000000000..cd6e7b1be --- /dev/null +++ b/vendor/github.com/robfig/cron/constantdelay.go @@ -0,0 +1,27 @@ +package cron + +import "time" + +// ConstantDelaySchedule represents a simple recurring duty cycle, e.g. "Every 5 minutes". +// It does not support jobs more frequent than once a second. +type ConstantDelaySchedule struct { + Delay time.Duration +} + +// Every returns a crontab Schedule that activates once every duration. +// Delays of less than a second are not supported (will round up to 1 second). +// Any fields less than a Second are truncated. +func Every(duration time.Duration) ConstantDelaySchedule { + if duration < time.Second { + duration = time.Second + } + return ConstantDelaySchedule{ + Delay: duration - time.Duration(duration.Nanoseconds())%time.Second, + } +} + +// Next returns the next time this should be run. +// This rounds so that the next activation time will be on the second. +func (schedule ConstantDelaySchedule) Next(t time.Time) time.Time { + return t.Add(schedule.Delay - time.Duration(t.Nanosecond())*time.Nanosecond) +} diff --git a/vendor/github.com/robfig/cron/cron.go b/vendor/github.com/robfig/cron/cron.go new file mode 100644 index 000000000..2318aeb2e --- /dev/null +++ b/vendor/github.com/robfig/cron/cron.go @@ -0,0 +1,259 @@ +package cron + +import ( + "log" + "runtime" + "sort" + "time" +) + +// Cron keeps track of any number of entries, invoking the associated func as +// specified by the schedule. It may be started, stopped, and the entries may +// be inspected while running. +type Cron struct { + entries []*Entry + stop chan struct{} + add chan *Entry + snapshot chan []*Entry + running bool + ErrorLog *log.Logger + location *time.Location +} + +// Job is an interface for submitted cron jobs. +type Job interface { + Run() +} + +// The Schedule describes a job's duty cycle. +type Schedule interface { + // Return the next activation time, later than the given time. + // Next is invoked initially, and then each time the job is run. + Next(time.Time) time.Time +} + +// Entry consists of a schedule and the func to execute on that schedule. +type Entry struct { + // The schedule on which this job should be run. + Schedule Schedule + + // The next time the job will run. This is the zero time if Cron has not been + // started or this entry's schedule is unsatisfiable + Next time.Time + + // The last time this job was run. This is the zero time if the job has never + // been run. + Prev time.Time + + // The Job to run. + Job Job +} + +// byTime is a wrapper for sorting the entry array by time +// (with zero time at the end). +type byTime []*Entry + +func (s byTime) Len() int { return len(s) } +func (s byTime) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s byTime) Less(i, j int) bool { + // Two zero times should return false. + // Otherwise, zero is "greater" than any other time. + // (To sort it at the end of the list.) + if s[i].Next.IsZero() { + return false + } + if s[j].Next.IsZero() { + return true + } + return s[i].Next.Before(s[j].Next) +} + +// New returns a new Cron job runner, in the Local time zone. +func New() *Cron { + return NewWithLocation(time.Now().Location()) +} + +// NewWithLocation returns a new Cron job runner. +func NewWithLocation(location *time.Location) *Cron { + return &Cron{ + entries: nil, + add: make(chan *Entry), + stop: make(chan struct{}), + snapshot: make(chan []*Entry), + running: false, + ErrorLog: nil, + location: location, + } +} + +// A wrapper that turns a func() into a cron.Job +type FuncJob func() + +func (f FuncJob) Run() { f() } + +// AddFunc adds a func to the Cron to be run on the given schedule. +func (c *Cron) AddFunc(spec string, cmd func()) error { + return c.AddJob(spec, FuncJob(cmd)) +} + +// AddJob adds a Job to the Cron to be run on the given schedule. +func (c *Cron) AddJob(spec string, cmd Job) error { + schedule, err := Parse(spec) + if err != nil { + return err + } + c.Schedule(schedule, cmd) + return nil +} + +// Schedule adds a Job to the Cron to be run on the given schedule. +func (c *Cron) Schedule(schedule Schedule, cmd Job) { + entry := &Entry{ + Schedule: schedule, + Job: cmd, + } + if !c.running { + c.entries = append(c.entries, entry) + return + } + + c.add <- entry +} + +// Entries returns a snapshot of the cron entries. +func (c *Cron) Entries() []*Entry { + if c.running { + c.snapshot <- nil + x := <-c.snapshot + return x + } + return c.entrySnapshot() +} + +// Location gets the time zone location +func (c *Cron) Location() *time.Location { + return c.location +} + +// Start the cron scheduler in its own go-routine, or no-op if already started. +func (c *Cron) Start() { + if c.running { + return + } + c.running = true + go c.run() +} + +// Run the cron scheduler, or no-op if already running. +func (c *Cron) Run() { + if c.running { + return + } + c.running = true + c.run() +} + +func (c *Cron) runWithRecovery(j Job) { + defer func() { + if r := recover(); r != nil { + const size = 64 << 10 + buf := make([]byte, size) + buf = buf[:runtime.Stack(buf, false)] + c.logf("cron: panic running job: %v\n%s", r, buf) + } + }() + j.Run() +} + +// Run the scheduler. this is private just due to the need to synchronize +// access to the 'running' state variable. +func (c *Cron) run() { + // Figure out the next activation times for each entry. + now := c.now() + for _, entry := range c.entries { + entry.Next = entry.Schedule.Next(now) + } + + for { + // Determine the next entry to run. + sort.Sort(byTime(c.entries)) + + var timer *time.Timer + if len(c.entries) == 0 || c.entries[0].Next.IsZero() { + // If there are no entries yet, just sleep - it still handles new entries + // and stop requests. + timer = time.NewTimer(100000 * time.Hour) + } else { + timer = time.NewTimer(c.entries[0].Next.Sub(now)) + } + + for { + select { + case now = <-timer.C: + now = now.In(c.location) + // Run every entry whose next time was less than now + for _, e := range c.entries { + if e.Next.After(now) || e.Next.IsZero() { + break + } + go c.runWithRecovery(e.Job) + e.Prev = e.Next + e.Next = e.Schedule.Next(now) + } + + case newEntry := <-c.add: + timer.Stop() + now = c.now() + newEntry.Next = newEntry.Schedule.Next(now) + c.entries = append(c.entries, newEntry) + + case <-c.snapshot: + c.snapshot <- c.entrySnapshot() + continue + + case <-c.stop: + timer.Stop() + return + } + + break + } + } +} + +// Logs an error to stderr or to the configured error log +func (c *Cron) logf(format string, args ...interface{}) { + if c.ErrorLog != nil { + c.ErrorLog.Printf(format, args...) + } else { + log.Printf(format, args...) + } +} + +// Stop stops the cron scheduler if it is running; otherwise it does nothing. +func (c *Cron) Stop() { + if !c.running { + return + } + c.stop <- struct{}{} + c.running = false +} + +// entrySnapshot returns a copy of the current cron entry list. +func (c *Cron) entrySnapshot() []*Entry { + entries := []*Entry{} + for _, e := range c.entries { + entries = append(entries, &Entry{ + Schedule: e.Schedule, + Next: e.Next, + Prev: e.Prev, + Job: e.Job, + }) + } + return entries +} + +// now returns current time in c location +func (c *Cron) now() time.Time { + return time.Now().In(c.location) +} diff --git a/vendor/github.com/robfig/cron/doc.go b/vendor/github.com/robfig/cron/doc.go new file mode 100644 index 000000000..d02ec2f3b --- /dev/null +++ b/vendor/github.com/robfig/cron/doc.go @@ -0,0 +1,129 @@ +/* +Package cron implements a cron spec parser and job runner. + +Usage + +Callers may register Funcs to be invoked on a given schedule. Cron will run +them in their own goroutines. + + c := cron.New() + c.AddFunc("0 30 * * * *", func() { fmt.Println("Every hour on the half hour") }) + c.AddFunc("@hourly", func() { fmt.Println("Every hour") }) + c.AddFunc("@every 1h30m", func() { fmt.Println("Every hour thirty") }) + c.Start() + .. + // Funcs are invoked in their own goroutine, asynchronously. + ... + // Funcs may also be added to a running Cron + c.AddFunc("@daily", func() { fmt.Println("Every day") }) + .. + // Inspect the cron job entries' next and previous run times. + inspect(c.Entries()) + .. + c.Stop() // Stop the scheduler (does not stop any jobs already running). + +CRON Expression Format + +A cron expression represents a set of times, using 6 space-separated fields. + + Field name | Mandatory? | Allowed values | Allowed special characters + ---------- | ---------- | -------------- | -------------------------- + Seconds | Yes | 0-59 | * / , - + Minutes | Yes | 0-59 | * / , - + Hours | Yes | 0-23 | * / , - + Day of month | Yes | 1-31 | * / , - ? + Month | Yes | 1-12 or JAN-DEC | * / , - + Day of week | Yes | 0-6 or SUN-SAT | * / , - ? + +Note: Month and Day-of-week field values are case insensitive. "SUN", "Sun", +and "sun" are equally accepted. + +Special Characters + +Asterisk ( * ) + +The asterisk indicates that the cron expression will match for all values of the +field; e.g., using an asterisk in the 5th field (month) would indicate every +month. + +Slash ( / ) + +Slashes are used to describe increments of ranges. For example 3-59/15 in the +1st field (minutes) would indicate the 3rd minute of the hour and every 15 +minutes thereafter. The form "*\/..." is equivalent to the form "first-last/...", +that is, an increment over the largest possible range of the field. The form +"N/..." is accepted as meaning "N-MAX/...", that is, starting at N, use the +increment until the end of that specific range. It does not wrap around. + +Comma ( , ) + +Commas are used to separate items of a list. For example, using "MON,WED,FRI" in +the 5th field (day of week) would mean Mondays, Wednesdays and Fridays. + +Hyphen ( - ) + +Hyphens are used to define ranges. For example, 9-17 would indicate every +hour between 9am and 5pm inclusive. + +Question mark ( ? ) + +Question mark may be used instead of '*' for leaving either day-of-month or +day-of-week blank. + +Predefined schedules + +You may use one of several pre-defined schedules in place of a cron expression. + + Entry | Description | Equivalent To + ----- | ----------- | ------------- + @yearly (or @annually) | Run once a year, midnight, Jan. 1st | 0 0 0 1 1 * + @monthly | Run once a month, midnight, first of month | 0 0 0 1 * * + @weekly | Run once a week, midnight between Sat/Sun | 0 0 0 * * 0 + @daily (or @midnight) | Run once a day, midnight | 0 0 0 * * * + @hourly | Run once an hour, beginning of hour | 0 0 * * * * + +Intervals + +You may also schedule a job to execute at fixed intervals, starting at the time it's added +or cron is run. This is supported by formatting the cron spec like this: + + @every + +where "duration" is a string accepted by time.ParseDuration +(http://golang.org/pkg/time/#ParseDuration). + +For example, "@every 1h30m10s" would indicate a schedule that activates after +1 hour, 30 minutes, 10 seconds, and then every interval after that. + +Note: The interval does not take the job runtime into account. For example, +if a job takes 3 minutes to run, and it is scheduled to run every 5 minutes, +it will have only 2 minutes of idle time between each run. + +Time zones + +All interpretation and scheduling is done in the machine's local time zone (as +provided by the Go time package (http://www.golang.org/pkg/time). + +Be aware that jobs scheduled during daylight-savings leap-ahead transitions will +not be run! + +Thread safety + +Since the Cron service runs concurrently with the calling code, some amount of +care must be taken to ensure proper synchronization. + +All cron methods are designed to be correctly synchronized as long as the caller +ensures that invocations have a clear happens-before ordering between them. + +Implementation + +Cron entries are stored in an array, sorted by their next activation time. Cron +sleeps until the next job is due to be run. + +Upon waking: + - it runs each entry that is active on that second + - it calculates the next run times for the jobs that were run + - it re-sorts the array of entries by next activation time. + - it goes to sleep until the soonest job. +*/ +package cron diff --git a/vendor/github.com/robfig/cron/parser.go b/vendor/github.com/robfig/cron/parser.go new file mode 100644 index 000000000..a5e83c0a8 --- /dev/null +++ b/vendor/github.com/robfig/cron/parser.go @@ -0,0 +1,380 @@ +package cron + +import ( + "fmt" + "math" + "strconv" + "strings" + "time" +) + +// Configuration options for creating a parser. Most options specify which +// fields should be included, while others enable features. If a field is not +// included the parser will assume a default value. These options do not change +// the order fields are parse in. +type ParseOption int + +const ( + Second ParseOption = 1 << iota // Seconds field, default 0 + Minute // Minutes field, default 0 + Hour // Hours field, default 0 + Dom // Day of month field, default * + Month // Month field, default * + Dow // Day of week field, default * + DowOptional // Optional day of week field, default * + Descriptor // Allow descriptors such as @monthly, @weekly, etc. +) + +var places = []ParseOption{ + Second, + Minute, + Hour, + Dom, + Month, + Dow, +} + +var defaults = []string{ + "0", + "0", + "0", + "*", + "*", + "*", +} + +// A custom Parser that can be configured. +type Parser struct { + options ParseOption + optionals int +} + +// Creates a custom Parser with custom options. +// +// // Standard parser without descriptors +// specParser := NewParser(Minute | Hour | Dom | Month | Dow) +// sched, err := specParser.Parse("0 0 15 */3 *") +// +// // Same as above, just excludes time fields +// subsParser := NewParser(Dom | Month | Dow) +// sched, err := specParser.Parse("15 */3 *") +// +// // Same as above, just makes Dow optional +// subsParser := NewParser(Dom | Month | DowOptional) +// sched, err := specParser.Parse("15 */3") +// +func NewParser(options ParseOption) Parser { + optionals := 0 + if options&DowOptional > 0 { + options |= Dow + optionals++ + } + return Parser{options, optionals} +} + +// Parse returns a new crontab schedule representing the given spec. +// It returns a descriptive error if the spec is not valid. +// It accepts crontab specs and features configured by NewParser. +func (p Parser) Parse(spec string) (Schedule, error) { + if len(spec) == 0 { + return nil, fmt.Errorf("Empty spec string") + } + if spec[0] == '@' && p.options&Descriptor > 0 { + return parseDescriptor(spec) + } + + // Figure out how many fields we need + max := 0 + for _, place := range places { + if p.options&place > 0 { + max++ + } + } + min := max - p.optionals + + // Split fields on whitespace + fields := strings.Fields(spec) + + // Validate number of fields + if count := len(fields); count < min || count > max { + if min == max { + return nil, fmt.Errorf("Expected exactly %d fields, found %d: %s", min, count, spec) + } + return nil, fmt.Errorf("Expected %d to %d fields, found %d: %s", min, max, count, spec) + } + + // Fill in missing fields + fields = expandFields(fields, p.options) + + var err error + field := func(field string, r bounds) uint64 { + if err != nil { + return 0 + } + var bits uint64 + bits, err = getField(field, r) + return bits + } + + var ( + second = field(fields[0], seconds) + minute = field(fields[1], minutes) + hour = field(fields[2], hours) + dayofmonth = field(fields[3], dom) + month = field(fields[4], months) + dayofweek = field(fields[5], dow) + ) + if err != nil { + return nil, err + } + + return &SpecSchedule{ + Second: second, + Minute: minute, + Hour: hour, + Dom: dayofmonth, + Month: month, + Dow: dayofweek, + }, nil +} + +func expandFields(fields []string, options ParseOption) []string { + n := 0 + count := len(fields) + expFields := make([]string, len(places)) + copy(expFields, defaults) + for i, place := range places { + if options&place > 0 { + expFields[i] = fields[n] + n++ + } + if n == count { + break + } + } + return expFields +} + +var standardParser = NewParser( + Minute | Hour | Dom | Month | Dow | Descriptor, +) + +// ParseStandard returns a new crontab schedule representing the given standardSpec +// (https://en.wikipedia.org/wiki/Cron). It differs from Parse requiring to always +// pass 5 entries representing: minute, hour, day of month, month and day of week, +// in that order. It returns a descriptive error if the spec is not valid. +// +// It accepts +// - Standard crontab specs, e.g. "* * * * ?" +// - Descriptors, e.g. "@midnight", "@every 1h30m" +func ParseStandard(standardSpec string) (Schedule, error) { + return standardParser.Parse(standardSpec) +} + +var defaultParser = NewParser( + Second | Minute | Hour | Dom | Month | DowOptional | Descriptor, +) + +// Parse returns a new crontab schedule representing the given spec. +// It returns a descriptive error if the spec is not valid. +// +// It accepts +// - Full crontab specs, e.g. "* * * * * ?" +// - Descriptors, e.g. "@midnight", "@every 1h30m" +func Parse(spec string) (Schedule, error) { + return defaultParser.Parse(spec) +} + +// getField returns an Int with the bits set representing all of the times that +// the field represents or error parsing field value. A "field" is a comma-separated +// list of "ranges". +func getField(field string, r bounds) (uint64, error) { + var bits uint64 + ranges := strings.FieldsFunc(field, func(r rune) bool { return r == ',' }) + for _, expr := range ranges { + bit, err := getRange(expr, r) + if err != nil { + return bits, err + } + bits |= bit + } + return bits, nil +} + +// getRange returns the bits indicated by the given expression: +// number | number "-" number [ "/" number ] +// or error parsing range. +func getRange(expr string, r bounds) (uint64, error) { + var ( + start, end, step uint + rangeAndStep = strings.Split(expr, "/") + lowAndHigh = strings.Split(rangeAndStep[0], "-") + singleDigit = len(lowAndHigh) == 1 + err error + ) + + var extra uint64 + if lowAndHigh[0] == "*" || lowAndHigh[0] == "?" { + start = r.min + end = r.max + extra = starBit + } else { + start, err = parseIntOrName(lowAndHigh[0], r.names) + if err != nil { + return 0, err + } + switch len(lowAndHigh) { + case 1: + end = start + case 2: + end, err = parseIntOrName(lowAndHigh[1], r.names) + if err != nil { + return 0, err + } + default: + return 0, fmt.Errorf("Too many hyphens: %s", expr) + } + } + + switch len(rangeAndStep) { + case 1: + step = 1 + case 2: + step, err = mustParseInt(rangeAndStep[1]) + if err != nil { + return 0, err + } + + // Special handling: "N/step" means "N-max/step". + if singleDigit { + end = r.max + } + default: + return 0, fmt.Errorf("Too many slashes: %s", expr) + } + + if start < r.min { + return 0, fmt.Errorf("Beginning of range (%d) below minimum (%d): %s", start, r.min, expr) + } + if end > r.max { + return 0, fmt.Errorf("End of range (%d) above maximum (%d): %s", end, r.max, expr) + } + if start > end { + return 0, fmt.Errorf("Beginning of range (%d) beyond end of range (%d): %s", start, end, expr) + } + if step == 0 { + return 0, fmt.Errorf("Step of range should be a positive number: %s", expr) + } + + return getBits(start, end, step) | extra, nil +} + +// parseIntOrName returns the (possibly-named) integer contained in expr. +func parseIntOrName(expr string, names map[string]uint) (uint, error) { + if names != nil { + if namedInt, ok := names[strings.ToLower(expr)]; ok { + return namedInt, nil + } + } + return mustParseInt(expr) +} + +// mustParseInt parses the given expression as an int or returns an error. +func mustParseInt(expr string) (uint, error) { + num, err := strconv.Atoi(expr) + if err != nil { + return 0, fmt.Errorf("Failed to parse int from %s: %s", expr, err) + } + if num < 0 { + return 0, fmt.Errorf("Negative number (%d) not allowed: %s", num, expr) + } + + return uint(num), nil +} + +// getBits sets all bits in the range [min, max], modulo the given step size. +func getBits(min, max, step uint) uint64 { + var bits uint64 + + // If step is 1, use shifts. + if step == 1 { + return ^(math.MaxUint64 << (max + 1)) & (math.MaxUint64 << min) + } + + // Else, use a simple loop. + for i := min; i <= max; i += step { + bits |= 1 << i + } + return bits +} + +// all returns all bits within the given bounds. (plus the star bit) +func all(r bounds) uint64 { + return getBits(r.min, r.max, 1) | starBit +} + +// parseDescriptor returns a predefined schedule for the expression, or error if none matches. +func parseDescriptor(descriptor string) (Schedule, error) { + switch descriptor { + case "@yearly", "@annually": + return &SpecSchedule{ + Second: 1 << seconds.min, + Minute: 1 << minutes.min, + Hour: 1 << hours.min, + Dom: 1 << dom.min, + Month: 1 << months.min, + Dow: all(dow), + }, nil + + case "@monthly": + return &SpecSchedule{ + Second: 1 << seconds.min, + Minute: 1 << minutes.min, + Hour: 1 << hours.min, + Dom: 1 << dom.min, + Month: all(months), + Dow: all(dow), + }, nil + + case "@weekly": + return &SpecSchedule{ + Second: 1 << seconds.min, + Minute: 1 << minutes.min, + Hour: 1 << hours.min, + Dom: all(dom), + Month: all(months), + Dow: 1 << dow.min, + }, nil + + case "@daily", "@midnight": + return &SpecSchedule{ + Second: 1 << seconds.min, + Minute: 1 << minutes.min, + Hour: 1 << hours.min, + Dom: all(dom), + Month: all(months), + Dow: all(dow), + }, nil + + case "@hourly": + return &SpecSchedule{ + Second: 1 << seconds.min, + Minute: 1 << minutes.min, + Hour: all(hours), + Dom: all(dom), + Month: all(months), + Dow: all(dow), + }, nil + } + + const every = "@every " + if strings.HasPrefix(descriptor, every) { + duration, err := time.ParseDuration(descriptor[len(every):]) + if err != nil { + return nil, fmt.Errorf("Failed to parse duration %s: %s", descriptor, err) + } + return Every(duration), nil + } + + return nil, fmt.Errorf("Unrecognized descriptor: %s", descriptor) +} diff --git a/vendor/github.com/robfig/cron/spec.go b/vendor/github.com/robfig/cron/spec.go new file mode 100644 index 000000000..aac9a60b9 --- /dev/null +++ b/vendor/github.com/robfig/cron/spec.go @@ -0,0 +1,158 @@ +package cron + +import "time" + +// SpecSchedule specifies a duty cycle (to the second granularity), based on a +// traditional crontab specification. It is computed initially and stored as bit sets. +type SpecSchedule struct { + Second, Minute, Hour, Dom, Month, Dow uint64 +} + +// bounds provides a range of acceptable values (plus a map of name to value). +type bounds struct { + min, max uint + names map[string]uint +} + +// The bounds for each field. +var ( + seconds = bounds{0, 59, nil} + minutes = bounds{0, 59, nil} + hours = bounds{0, 23, nil} + dom = bounds{1, 31, nil} + months = bounds{1, 12, map[string]uint{ + "jan": 1, + "feb": 2, + "mar": 3, + "apr": 4, + "may": 5, + "jun": 6, + "jul": 7, + "aug": 8, + "sep": 9, + "oct": 10, + "nov": 11, + "dec": 12, + }} + dow = bounds{0, 6, map[string]uint{ + "sun": 0, + "mon": 1, + "tue": 2, + "wed": 3, + "thu": 4, + "fri": 5, + "sat": 6, + }} +) + +const ( + // Set the top bit if a star was included in the expression. + starBit = 1 << 63 +) + +// Next returns the next time this schedule is activated, greater than the given +// time. If no time can be found to satisfy the schedule, return the zero time. +func (s *SpecSchedule) Next(t time.Time) time.Time { + // General approach: + // For Month, Day, Hour, Minute, Second: + // Check if the time value matches. If yes, continue to the next field. + // If the field doesn't match the schedule, then increment the field until it matches. + // While incrementing the field, a wrap-around brings it back to the beginning + // of the field list (since it is necessary to re-verify previous field + // values) + + // Start at the earliest possible time (the upcoming second). + t = t.Add(1*time.Second - time.Duration(t.Nanosecond())*time.Nanosecond) + + // This flag indicates whether a field has been incremented. + added := false + + // If no time is found within five years, return zero. + yearLimit := t.Year() + 5 + +WRAP: + if t.Year() > yearLimit { + return time.Time{} + } + + // Find the first applicable month. + // If it's this month, then do nothing. + for 1< 0 + dowMatch bool = 1< 0 + ) + if s.Dom&starBit > 0 || s.Dow&starBit > 0 { + return domMatch && dowMatch + } + return domMatch || dowMatch +} diff --git a/vendor/k8s.io/apiserver/LICENSE b/vendor/k8s.io/apiserver/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/k8s.io/apiserver/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/k8s.io/apiserver/pkg/authentication/user/doc.go b/vendor/k8s.io/apiserver/pkg/authentication/user/doc.go new file mode 100644 index 000000000..3d87fd72c --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authentication/user/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package user contains utilities for dealing with simple user exchange in the auth +// packages. The user.Info interface defines an interface for exchanging that info. +package user // import "k8s.io/apiserver/pkg/authentication/user" diff --git a/vendor/k8s.io/apiserver/pkg/authentication/user/user.go b/vendor/k8s.io/apiserver/pkg/authentication/user/user.go new file mode 100644 index 000000000..4d6ec0980 --- /dev/null +++ b/vendor/k8s.io/apiserver/pkg/authentication/user/user.go @@ -0,0 +1,84 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package user + +// Info describes a user that has been authenticated to the system. +type Info interface { + // GetName returns the name that uniquely identifies this user among all + // other active users. + GetName() string + // GetUID returns a unique value for a particular user that will change + // if the user is removed from the system and another user is added with + // the same name. + GetUID() string + // GetGroups returns the names of the groups the user is a member of + GetGroups() []string + + // GetExtra can contain any additional information that the authenticator + // thought was interesting. One example would be scopes on a token. + // Keys in this map should be namespaced to the authenticator or + // authenticator/authorizer pair making use of them. + // For instance: "example.org/foo" instead of "foo" + // This is a map[string][]string because it needs to be serializeable into + // a SubjectAccessReviewSpec.authorization.k8s.io for proper authorization + // delegation flows + // In order to faithfully round-trip through an impersonation flow, these keys + // MUST be lowercase. + GetExtra() map[string][]string +} + +// DefaultInfo provides a simple user information exchange object +// for components that implement the UserInfo interface. +type DefaultInfo struct { + Name string + UID string + Groups []string + Extra map[string][]string +} + +func (i *DefaultInfo) GetName() string { + return i.Name +} + +func (i *DefaultInfo) GetUID() string { + return i.UID +} + +func (i *DefaultInfo) GetGroups() []string { + return i.Groups +} + +func (i *DefaultInfo) GetExtra() map[string][]string { + return i.Extra +} + +// well-known user and group names +const ( + SystemPrivilegedGroup = "system:masters" + NodesGroup = "system:nodes" + MonitoringGroup = "system:monitoring" + AllUnauthenticated = "system:unauthenticated" + AllAuthenticated = "system:authenticated" + + Anonymous = "system:anonymous" + APIServerUser = "system:apiserver" + + // core kubernetes process identities + KubeProxy = "system:kube-proxy" + KubeControllerManager = "system:kube-controller-manager" + KubeScheduler = "system:kube-scheduler" +) diff --git a/vendor/k8s.io/component-base/metrics/OWNERS b/vendor/k8s.io/component-base/metrics/OWNERS new file mode 100644 index 000000000..be371a4a0 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/OWNERS @@ -0,0 +1,11 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: + - sig-instrumentation-approvers + - logicalhan + - RainbowMango +reviewers: + - sig-instrumentation-reviewers + - YoyinZyc +labels: + - sig/instrumentation diff --git a/vendor/k8s.io/component-base/metrics/buckets.go b/vendor/k8s.io/component-base/metrics/buckets.go new file mode 100644 index 000000000..48d3093e0 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/buckets.go @@ -0,0 +1,43 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "github.com/prometheus/client_golang/prometheus" +) + +// DefBuckets is a wrapper for prometheus.DefBuckets +var DefBuckets = prometheus.DefBuckets + +// LinearBuckets is a wrapper for prometheus.LinearBuckets. +func LinearBuckets(start, width float64, count int) []float64 { + return prometheus.LinearBuckets(start, width, count) +} + +// ExponentialBuckets is a wrapper for prometheus.ExponentialBuckets. +func ExponentialBuckets(start, factor float64, count int) []float64 { + return prometheus.ExponentialBuckets(start, factor, count) +} + +// MergeBuckets merges buckets together +func MergeBuckets(buckets ...[]float64) []float64 { + result := make([]float64, 1) + for _, s := range buckets { + result = append(result, s...) + } + return result +} diff --git a/vendor/k8s.io/component-base/metrics/collector.go b/vendor/k8s.io/component-base/metrics/collector.go new file mode 100644 index 000000000..0718b6e13 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/collector.go @@ -0,0 +1,190 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "fmt" + + "github.com/blang/semver/v4" + "github.com/prometheus/client_golang/prometheus" +) + +// StableCollector extends the prometheus.Collector interface to allow customization of the +// metric registration process, it's especially intend to be used in scenario of custom collector. +type StableCollector interface { + prometheus.Collector + + // DescribeWithStability sends the super-set of all possible metrics.Desc collected + // by this StableCollector to the provided channel. + DescribeWithStability(chan<- *Desc) + + // CollectWithStability sends each collected metrics.Metric via the provide channel. + CollectWithStability(chan<- Metric) + + // Create will initialize all Desc and it intends to be called by registry. + Create(version *semver.Version, self StableCollector) bool + + // ClearState will clear all the states marked by Create. + ClearState() + + // HiddenMetrics tells the list of hidden metrics with fqName. + HiddenMetrics() []string +} + +// BaseStableCollector which implements almost all methods defined by StableCollector +// is a convenient assistant for custom collectors. +// It is recommended to inherit BaseStableCollector when implementing custom collectors. +type BaseStableCollector struct { + descriptors map[string]*Desc // stores all descriptors by pair, these are collected from DescribeWithStability(). + registerable map[string]*Desc // stores registerable descriptors by pair, is a subset of descriptors. + hidden map[string]*Desc // stores hidden descriptors by pair, is a subset of descriptors. + self StableCollector +} + +// DescribeWithStability sends all descriptors to the provided channel. +// Every custom collector should over-write this method. +func (bsc *BaseStableCollector) DescribeWithStability(ch chan<- *Desc) { + panic(fmt.Errorf("custom collector should over-write DescribeWithStability method")) +} + +// Describe sends all descriptors to the provided channel. +// It intended to be called by prometheus registry. +func (bsc *BaseStableCollector) Describe(ch chan<- *prometheus.Desc) { + for _, d := range bsc.registerable { + ch <- d.toPrometheusDesc() + } +} + +// CollectWithStability sends all metrics to the provided channel. +// Every custom collector should over-write this method. +func (bsc *BaseStableCollector) CollectWithStability(ch chan<- Metric) { + panic(fmt.Errorf("custom collector should over-write CollectWithStability method")) +} + +// Collect is called by the Prometheus registry when collecting metrics. +func (bsc *BaseStableCollector) Collect(ch chan<- prometheus.Metric) { + mch := make(chan Metric) + + go func() { + bsc.self.CollectWithStability(mch) + close(mch) + }() + + for m := range mch { + // nil Metric usually means hidden metrics + if m == nil { + continue + } + + ch <- prometheus.Metric(m) + } +} + +func (bsc *BaseStableCollector) add(d *Desc) { + if len(d.fqName) == 0 { + panic("nameless metrics will be not allowed") + } + + if bsc.descriptors == nil { + bsc.descriptors = make(map[string]*Desc) + } + + if _, exist := bsc.descriptors[d.fqName]; exist { + panic(fmt.Sprintf("duplicate metrics (%s) will be not allowed", d.fqName)) + } + + bsc.descriptors[d.fqName] = d +} + +// Init intends to be called by registry. +func (bsc *BaseStableCollector) init(self StableCollector) { + bsc.self = self + + dch := make(chan *Desc) + + // collect all possible descriptions from custom side + go func() { + bsc.self.DescribeWithStability(dch) + close(dch) + }() + + for d := range dch { + bsc.add(d) + } +} + +func (bsc *BaseStableCollector) trackRegistrableDescriptor(d *Desc) { + if bsc.registerable == nil { + bsc.registerable = make(map[string]*Desc) + } + + bsc.registerable[d.fqName] = d +} + +func (bsc *BaseStableCollector) trackHiddenDescriptor(d *Desc) { + if bsc.hidden == nil { + bsc.hidden = make(map[string]*Desc) + } + + bsc.hidden[d.fqName] = d +} + +// Create intends to be called by registry. +// Create will return true as long as there is one or more metrics not be hidden. +// Otherwise return false, that means the whole collector will be ignored by registry. +func (bsc *BaseStableCollector) Create(version *semver.Version, self StableCollector) bool { + bsc.init(self) + + for _, d := range bsc.descriptors { + d.create(version) + if d.IsHidden() { + bsc.trackHiddenDescriptor(d) + } else { + bsc.trackRegistrableDescriptor(d) + } + } + + if len(bsc.registerable) > 0 { + return true + } + + return false +} + +// ClearState will clear all the states marked by Create. +// It intends to be used for re-register a hidden metric. +func (bsc *BaseStableCollector) ClearState() { + for _, d := range bsc.descriptors { + d.ClearState() + } + + bsc.descriptors = nil + bsc.registerable = nil + bsc.hidden = nil + bsc.self = nil +} + +// HiddenMetrics tells the list of hidden metrics with fqName. +func (bsc *BaseStableCollector) HiddenMetrics() (fqNames []string) { + for i := range bsc.hidden { + fqNames = append(fqNames, bsc.hidden[i].fqName) + } + return +} + +// Check if our BaseStableCollector implements necessary interface +var _ StableCollector = &BaseStableCollector{} diff --git a/vendor/k8s.io/component-base/metrics/counter.go b/vendor/k8s.io/component-base/metrics/counter.go new file mode 100644 index 000000000..5664a68a9 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/counter.go @@ -0,0 +1,242 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "context" + + "github.com/blang/semver/v4" + "github.com/prometheus/client_golang/prometheus" + dto "github.com/prometheus/client_model/go" +) + +// Counter is our internal representation for our wrapping struct around prometheus +// counters. Counter implements both kubeCollector and CounterMetric. +type Counter struct { + CounterMetric + *CounterOpts + lazyMetric + selfCollector +} + +// The implementation of the Metric interface is expected by testutil.GetCounterMetricValue. +var _ Metric = &Counter{} + +// NewCounter returns an object which satisfies the kubeCollector and CounterMetric interfaces. +// However, the object returned will not measure anything unless the collector is first +// registered, since the metric is lazily instantiated. +func NewCounter(opts *CounterOpts) *Counter { + opts.StabilityLevel.setDefaults() + + kc := &Counter{ + CounterOpts: opts, + lazyMetric: lazyMetric{stabilityLevel: opts.StabilityLevel}, + } + kc.setPrometheusCounter(noop) + kc.lazyInit(kc, BuildFQName(opts.Namespace, opts.Subsystem, opts.Name)) + return kc +} + +func (c *Counter) Desc() *prometheus.Desc { + return c.metric.Desc() +} + +func (c *Counter) Write(to *dto.Metric) error { + return c.metric.Write(to) +} + +// Reset resets the underlying prometheus Counter to start counting from 0 again +func (c *Counter) Reset() { + if !c.IsCreated() { + return + } + c.setPrometheusCounter(prometheus.NewCounter(c.CounterOpts.toPromCounterOpts())) +} + +// setPrometheusCounter sets the underlying CounterMetric object, i.e. the thing that does the measurement. +func (c *Counter) setPrometheusCounter(counter prometheus.Counter) { + c.CounterMetric = counter + c.initSelfCollection(counter) +} + +// DeprecatedVersion returns a pointer to the Version or nil +func (c *Counter) DeprecatedVersion() *semver.Version { + return parseSemver(c.CounterOpts.DeprecatedVersion) +} + +// initializeMetric invocation creates the actual underlying Counter. Until this method is called +// the underlying counter is a no-op. +func (c *Counter) initializeMetric() { + c.CounterOpts.annotateStabilityLevel() + // this actually creates the underlying prometheus counter. + c.setPrometheusCounter(prometheus.NewCounter(c.CounterOpts.toPromCounterOpts())) +} + +// initializeDeprecatedMetric invocation creates the actual (but deprecated) Counter. Until this method +// is called the underlying counter is a no-op. +func (c *Counter) initializeDeprecatedMetric() { + c.CounterOpts.markDeprecated() + c.initializeMetric() +} + +// WithContext allows the normal Counter metric to pass in context. The context is no-op now. +func (c *Counter) WithContext(ctx context.Context) CounterMetric { + return c.CounterMetric +} + +// CounterVec is the internal representation of our wrapping struct around prometheus +// counterVecs. CounterVec implements both kubeCollector and CounterVecMetric. +type CounterVec struct { + *prometheus.CounterVec + *CounterOpts + lazyMetric + originalLabels []string +} + +var _ kubeCollector = &CounterVec{} + +// TODO: make this true: var _ CounterVecMetric = &CounterVec{} + +// NewCounterVec returns an object which satisfies the kubeCollector and (almost) CounterVecMetric interfaces. +// However, the object returned will not measure anything unless the collector is first +// registered, since the metric is lazily instantiated, and only members extracted after +// registration will actually measure anything. +func NewCounterVec(opts *CounterOpts, labels []string) *CounterVec { + opts.StabilityLevel.setDefaults() + + fqName := BuildFQName(opts.Namespace, opts.Subsystem, opts.Name) + allowListLock.RLock() + if allowList, ok := labelValueAllowLists[fqName]; ok { + opts.LabelValueAllowLists = allowList + } + allowListLock.RUnlock() + + cv := &CounterVec{ + CounterVec: noopCounterVec, + CounterOpts: opts, + originalLabels: labels, + lazyMetric: lazyMetric{stabilityLevel: opts.StabilityLevel}, + } + cv.lazyInit(cv, fqName) + return cv +} + +// DeprecatedVersion returns a pointer to the Version or nil +func (v *CounterVec) DeprecatedVersion() *semver.Version { + return parseSemver(v.CounterOpts.DeprecatedVersion) + +} + +// initializeMetric invocation creates the actual underlying CounterVec. Until this method is called +// the underlying counterVec is a no-op. +func (v *CounterVec) initializeMetric() { + v.CounterOpts.annotateStabilityLevel() + v.CounterVec = prometheus.NewCounterVec(v.CounterOpts.toPromCounterOpts(), v.originalLabels) +} + +// initializeDeprecatedMetric invocation creates the actual (but deprecated) CounterVec. Until this method is called +// the underlying counterVec is a no-op. +func (v *CounterVec) initializeDeprecatedMetric() { + v.CounterOpts.markDeprecated() + v.initializeMetric() +} + +// Default Prometheus Vec behavior is that member extraction results in creation of a new element +// if one with the unique label values is not found in the underlying stored metricMap. +// This means that if this function is called but the underlying metric is not registered +// (which means it will never be exposed externally nor consumed), the metric will exist in memory +// for perpetuity (i.e. throughout application lifecycle). +// +// For reference: https://github.com/prometheus/client_golang/blob/v0.9.2/prometheus/counter.go#L179-L197 +// +// In contrast, the Vec behavior in this package is that member extraction before registration +// returns a permanent noop object. + +// WithLabelValues returns the Counter for the given slice of label +// values (same order as the VariableLabels in Desc). If that combination of +// label values is accessed for the first time, a new Counter is created IFF the counterVec +// has been registered to a metrics registry. +func (v *CounterVec) WithLabelValues(lvs ...string) CounterMetric { + if !v.IsCreated() { + return noop // return no-op counter + } + if v.LabelValueAllowLists != nil { + v.LabelValueAllowLists.ConstrainToAllowedList(v.originalLabels, lvs) + } + return v.CounterVec.WithLabelValues(lvs...) +} + +// With returns the Counter for the given Labels map (the label names +// must match those of the VariableLabels in Desc). If that label map is +// accessed for the first time, a new Counter is created IFF the counterVec has +// been registered to a metrics registry. +func (v *CounterVec) With(labels map[string]string) CounterMetric { + if !v.IsCreated() { + return noop // return no-op counter + } + if v.LabelValueAllowLists != nil { + v.LabelValueAllowLists.ConstrainLabelMap(labels) + } + return v.CounterVec.With(labels) +} + +// Delete deletes the metric where the variable labels are the same as those +// passed in as labels. It returns true if a metric was deleted. +// +// It is not an error if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc. However, such inconsistent Labels +// can never match an actual metric, so the method will always return false in +// that case. +func (v *CounterVec) Delete(labels map[string]string) bool { + if !v.IsCreated() { + return false // since we haven't created the metric, we haven't deleted a metric with the passed in values + } + return v.CounterVec.Delete(labels) +} + +// Reset deletes all metrics in this vector. +func (v *CounterVec) Reset() { + if !v.IsCreated() { + return + } + + v.CounterVec.Reset() +} + +// WithContext returns wrapped CounterVec with context +func (v *CounterVec) WithContext(ctx context.Context) *CounterVecWithContext { + return &CounterVecWithContext{ + ctx: ctx, + CounterVec: v, + } +} + +// CounterVecWithContext is the wrapper of CounterVec with context. +type CounterVecWithContext struct { + *CounterVec + ctx context.Context +} + +// WithLabelValues is the wrapper of CounterVec.WithLabelValues. +func (vc *CounterVecWithContext) WithLabelValues(lvs ...string) CounterMetric { + return vc.CounterVec.WithLabelValues(lvs...) +} + +// With is the wrapper of CounterVec.With. +func (vc *CounterVecWithContext) With(labels map[string]string) CounterMetric { + return vc.CounterVec.With(labels) +} diff --git a/vendor/k8s.io/component-base/metrics/desc.go b/vendor/k8s.io/component-base/metrics/desc.go new file mode 100644 index 000000000..2ca9cfa7c --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/desc.go @@ -0,0 +1,225 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "fmt" + "sync" + + "github.com/blang/semver/v4" + "github.com/prometheus/client_golang/prometheus" + + "k8s.io/klog/v2" +) + +// Desc is a prometheus.Desc extension. +// +// Use NewDesc to create new Desc instances. +type Desc struct { + // fqName has been built from Namespace, Subsystem, and Name. + fqName string + // help provides some helpful information about this metric. + help string + // constLabels is the label names. Their label values are variable. + constLabels Labels + // variableLabels contains names of labels for which the metric + // maintains variable values. + variableLabels []string + + // promDesc is the descriptor used by every Prometheus Metric. + promDesc *prometheus.Desc + annotatedHelp string + + // stabilityLevel represents the API guarantees for a given defined metric. + stabilityLevel StabilityLevel + // deprecatedVersion represents in which version this metric be deprecated. + deprecatedVersion string + + isDeprecated bool + isHidden bool + isCreated bool + createLock sync.RWMutex + markDeprecationOnce sync.Once + createOnce sync.Once + deprecateOnce sync.Once + hideOnce sync.Once + annotateOnce sync.Once +} + +// NewDesc extends prometheus.NewDesc with stability support. +// +// The stabilityLevel should be valid stability label, such as "metrics.ALPHA" +// and "metrics.STABLE"(Maybe "metrics.BETA" in future). Default value "metrics.ALPHA" +// will be used in case of empty or invalid stability label. +// +// The deprecatedVersion represents in which version this Metric be deprecated. +// The deprecation policy outlined by the control plane metrics stability KEP. +func NewDesc(fqName string, help string, variableLabels []string, constLabels Labels, + stabilityLevel StabilityLevel, deprecatedVersion string) *Desc { + d := &Desc{ + fqName: fqName, + help: help, + annotatedHelp: help, + variableLabels: variableLabels, + constLabels: constLabels, + stabilityLevel: stabilityLevel, + deprecatedVersion: deprecatedVersion, + } + d.stabilityLevel.setDefaults() + + return d +} + +// String formats the Desc as a string. +// The stability metadata maybe annotated in 'HELP' section if called after registry, +// otherwise not. +// e.g. "Desc{fqName: "normal_stable_descriptor", help: "[STABLE] this is a stable descriptor", constLabels: {}, variableLabels: []}" +func (d *Desc) String() string { + if d.isCreated { + return d.promDesc.String() + } + + return prometheus.NewDesc(d.fqName, d.help, d.variableLabels, prometheus.Labels(d.constLabels)).String() +} + +// toPrometheusDesc transform self to prometheus.Desc +func (d *Desc) toPrometheusDesc() *prometheus.Desc { + return d.promDesc +} + +// DeprecatedVersion returns a pointer to the Version or nil +func (d *Desc) DeprecatedVersion() *semver.Version { + return parseSemver(d.deprecatedVersion) + +} + +func (d *Desc) determineDeprecationStatus(version semver.Version) { + selfVersion := d.DeprecatedVersion() + if selfVersion == nil { + return + } + d.markDeprecationOnce.Do(func() { + if selfVersion.LTE(version) { + d.isDeprecated = true + } + if ShouldShowHidden() { + klog.Warningf("Hidden metrics(%s) have been manually overridden, showing this very deprecated metric.", d.fqName) + return + } + if shouldHide(&version, selfVersion) { + // TODO(RainbowMango): Remove this log temporarily. https://github.com/kubernetes/kubernetes/issues/85369 + // klog.Warningf("This metric(%s) has been deprecated for more than one release, hiding.", d.fqName) + d.isHidden = true + } + }) +} + +// IsHidden returns if metric will be hidden +func (d *Desc) IsHidden() bool { + return d.isHidden +} + +// IsDeprecated returns if metric has been deprecated +func (d *Desc) IsDeprecated() bool { + return d.isDeprecated +} + +// IsCreated returns if metric has been created. +func (d *Desc) IsCreated() bool { + d.createLock.RLock() + defer d.createLock.RUnlock() + + return d.isCreated +} + +// create forces the initialization of Desc which has been deferred until +// the point at which this method is invoked. This method will determine whether +// the Desc is deprecated or hidden, no-opting if the Desc should be considered +// hidden. Furthermore, this function no-opts and returns true if Desc is already +// created. +func (d *Desc) create(version *semver.Version) bool { + if version != nil { + d.determineDeprecationStatus(*version) + } + + // let's not create if this metric is slated to be hidden + if d.IsHidden() { + return false + } + d.createOnce.Do(func() { + d.createLock.Lock() + defer d.createLock.Unlock() + + d.isCreated = true + if d.IsDeprecated() { + d.initializeDeprecatedDesc() + } else { + d.initialize() + } + }) + return d.IsCreated() +} + +// ClearState will clear all the states marked by Create. +// It intends to be used for re-register a hidden metric. +func (d *Desc) ClearState() { + d.isDeprecated = false + d.isHidden = false + d.isCreated = false + + d.markDeprecationOnce = *new(sync.Once) + d.createOnce = *new(sync.Once) + d.deprecateOnce = *new(sync.Once) + d.hideOnce = *new(sync.Once) + d.annotateOnce = *new(sync.Once) + + d.annotatedHelp = d.help + d.promDesc = nil +} + +func (d *Desc) markDeprecated() { + d.deprecateOnce.Do(func() { + d.annotatedHelp = fmt.Sprintf("(Deprecated since %s) %s", d.deprecatedVersion, d.annotatedHelp) + }) +} + +func (d *Desc) annotateStabilityLevel() { + d.annotateOnce.Do(func() { + d.annotatedHelp = fmt.Sprintf("[%v] %v", d.stabilityLevel, d.annotatedHelp) + }) +} + +func (d *Desc) initialize() { + d.annotateStabilityLevel() + + // this actually creates the underlying prometheus desc. + d.promDesc = prometheus.NewDesc(d.fqName, d.annotatedHelp, d.variableLabels, prometheus.Labels(d.constLabels)) +} + +func (d *Desc) initializeDeprecatedDesc() { + d.markDeprecated() + d.initialize() +} + +// GetRawDesc will returns a new *Desc with original parameters provided to NewDesc(). +// +// It will be useful in testing scenario that the same Desc be registered to different registry. +// 1. Desc `D` is registered to registry 'A' in TestA (Note: `D` maybe created) +// 2. Desc `D` is registered to registry 'B' in TestB (Note: since 'D' has been created once, thus will be ignored by registry 'B') +func (d *Desc) GetRawDesc() *Desc { + return NewDesc(d.fqName, d.help, d.variableLabels, d.constLabels, d.stabilityLevel, d.deprecatedVersion) +} diff --git a/vendor/k8s.io/component-base/metrics/gauge.go b/vendor/k8s.io/component-base/metrics/gauge.go new file mode 100644 index 000000000..89631115a --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/gauge.go @@ -0,0 +1,277 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "context" + + "github.com/blang/semver/v4" + "github.com/prometheus/client_golang/prometheus" + + "k8s.io/component-base/version" +) + +// Gauge is our internal representation for our wrapping struct around prometheus +// gauges. kubeGauge implements both kubeCollector and KubeGauge. +type Gauge struct { + GaugeMetric + *GaugeOpts + lazyMetric + selfCollector +} + +var _ GaugeMetric = &Gauge{} +var _ Registerable = &Gauge{} +var _ kubeCollector = &Gauge{} + +// NewGauge returns an object which satisfies the kubeCollector, Registerable, and Gauge interfaces. +// However, the object returned will not measure anything unless the collector is first +// registered, since the metric is lazily instantiated. +func NewGauge(opts *GaugeOpts) *Gauge { + opts.StabilityLevel.setDefaults() + + kc := &Gauge{ + GaugeOpts: opts, + lazyMetric: lazyMetric{stabilityLevel: opts.StabilityLevel}, + } + kc.setPrometheusGauge(noop) + kc.lazyInit(kc, BuildFQName(opts.Namespace, opts.Subsystem, opts.Name)) + return kc +} + +// setPrometheusGauge sets the underlying KubeGauge object, i.e. the thing that does the measurement. +func (g *Gauge) setPrometheusGauge(gauge prometheus.Gauge) { + g.GaugeMetric = gauge + g.initSelfCollection(gauge) +} + +// DeprecatedVersion returns a pointer to the Version or nil +func (g *Gauge) DeprecatedVersion() *semver.Version { + return parseSemver(g.GaugeOpts.DeprecatedVersion) +} + +// initializeMetric invocation creates the actual underlying Gauge. Until this method is called +// the underlying gauge is a no-op. +func (g *Gauge) initializeMetric() { + g.GaugeOpts.annotateStabilityLevel() + // this actually creates the underlying prometheus gauge. + g.setPrometheusGauge(prometheus.NewGauge(g.GaugeOpts.toPromGaugeOpts())) +} + +// initializeDeprecatedMetric invocation creates the actual (but deprecated) Gauge. Until this method +// is called the underlying gauge is a no-op. +func (g *Gauge) initializeDeprecatedMetric() { + g.GaugeOpts.markDeprecated() + g.initializeMetric() +} + +// WithContext allows the normal Gauge metric to pass in context. The context is no-op now. +func (g *Gauge) WithContext(ctx context.Context) GaugeMetric { + return g.GaugeMetric +} + +// GaugeVec is the internal representation of our wrapping struct around prometheus +// gaugeVecs. kubeGaugeVec implements both kubeCollector and KubeGaugeVec. +type GaugeVec struct { + *prometheus.GaugeVec + *GaugeOpts + lazyMetric + originalLabels []string +} + +var _ GaugeVecMetric = &GaugeVec{} +var _ Registerable = &GaugeVec{} +var _ kubeCollector = &GaugeVec{} + +// NewGaugeVec returns an object which satisfies the kubeCollector, Registerable, and GaugeVecMetric interfaces. +// However, the object returned will not measure anything unless the collector is first +// registered, since the metric is lazily instantiated, and only members extracted after +// registration will actually measure anything. +func NewGaugeVec(opts *GaugeOpts, labels []string) *GaugeVec { + opts.StabilityLevel.setDefaults() + + fqName := BuildFQName(opts.Namespace, opts.Subsystem, opts.Name) + allowListLock.RLock() + if allowList, ok := labelValueAllowLists[fqName]; ok { + opts.LabelValueAllowLists = allowList + } + allowListLock.RUnlock() + + cv := &GaugeVec{ + GaugeVec: noopGaugeVec, + GaugeOpts: opts, + originalLabels: labels, + lazyMetric: lazyMetric{stabilityLevel: opts.StabilityLevel}, + } + cv.lazyInit(cv, fqName) + return cv +} + +// DeprecatedVersion returns a pointer to the Version or nil +func (v *GaugeVec) DeprecatedVersion() *semver.Version { + return parseSemver(v.GaugeOpts.DeprecatedVersion) +} + +// initializeMetric invocation creates the actual underlying GaugeVec. Until this method is called +// the underlying gaugeVec is a no-op. +func (v *GaugeVec) initializeMetric() { + v.GaugeOpts.annotateStabilityLevel() + v.GaugeVec = prometheus.NewGaugeVec(v.GaugeOpts.toPromGaugeOpts(), v.originalLabels) +} + +// initializeDeprecatedMetric invocation creates the actual (but deprecated) GaugeVec. Until this method is called +// the underlying gaugeVec is a no-op. +func (v *GaugeVec) initializeDeprecatedMetric() { + v.GaugeOpts.markDeprecated() + v.initializeMetric() +} + +func (v *GaugeVec) WithLabelValuesChecked(lvs ...string) (GaugeMetric, error) { + if !v.IsCreated() { + if v.IsHidden() { + return noop, nil + } + return noop, errNotRegistered // return no-op gauge + } + if v.LabelValueAllowLists != nil { + v.LabelValueAllowLists.ConstrainToAllowedList(v.originalLabels, lvs) + } + elt, err := v.GaugeVec.GetMetricWithLabelValues(lvs...) + return elt, err +} + +// Default Prometheus Vec behavior is that member extraction results in creation of a new element +// if one with the unique label values is not found in the underlying stored metricMap. +// This means that if this function is called but the underlying metric is not registered +// (which means it will never be exposed externally nor consumed), the metric will exist in memory +// for perpetuity (i.e. throughout application lifecycle). +// +// For reference: https://github.com/prometheus/client_golang/blob/v0.9.2/prometheus/gauge.go#L190-L208 +// +// In contrast, the Vec behavior in this package is that member extraction before registration +// returns a permanent noop object. + +// WithLabelValues returns the GaugeMetric for the given slice of label +// values (same order as the VariableLabels in Desc). If that combination of +// label values is accessed for the first time, a new GaugeMetric is created IFF the gaugeVec +// has been registered to a metrics registry. +func (v *GaugeVec) WithLabelValues(lvs ...string) GaugeMetric { + ans, err := v.WithLabelValuesChecked(lvs...) + if err == nil || ErrIsNotRegistered(err) { + return ans + } + panic(err) +} + +func (v *GaugeVec) WithChecked(labels map[string]string) (GaugeMetric, error) { + if !v.IsCreated() { + if v.IsHidden() { + return noop, nil + } + return noop, errNotRegistered // return no-op gauge + } + if v.LabelValueAllowLists != nil { + v.LabelValueAllowLists.ConstrainLabelMap(labels) + } + elt, err := v.GaugeVec.GetMetricWith(labels) + return elt, err +} + +// With returns the GaugeMetric for the given Labels map (the label names +// must match those of the VariableLabels in Desc). If that label map is +// accessed for the first time, a new GaugeMetric is created IFF the gaugeVec has +// been registered to a metrics registry. +func (v *GaugeVec) With(labels map[string]string) GaugeMetric { + ans, err := v.WithChecked(labels) + if err == nil || ErrIsNotRegistered(err) { + return ans + } + panic(err) +} + +// Delete deletes the metric where the variable labels are the same as those +// passed in as labels. It returns true if a metric was deleted. +// +// It is not an error if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc. However, such inconsistent Labels +// can never match an actual metric, so the method will always return false in +// that case. +func (v *GaugeVec) Delete(labels map[string]string) bool { + if !v.IsCreated() { + return false // since we haven't created the metric, we haven't deleted a metric with the passed in values + } + return v.GaugeVec.Delete(labels) +} + +// Reset deletes all metrics in this vector. +func (v *GaugeVec) Reset() { + if !v.IsCreated() { + return + } + + v.GaugeVec.Reset() +} + +func newGaugeFunc(opts *GaugeOpts, function func() float64, v semver.Version) GaugeFunc { + g := NewGauge(opts) + + if !g.Create(&v) { + return nil + } + + return prometheus.NewGaugeFunc(g.GaugeOpts.toPromGaugeOpts(), function) +} + +// NewGaugeFunc creates a new GaugeFunc based on the provided GaugeOpts. The +// value reported is determined by calling the given function from within the +// Write method. Take into account that metric collection may happen +// concurrently. If that results in concurrent calls to Write, like in the case +// where a GaugeFunc is directly registered with Prometheus, the provided +// function must be concurrency-safe. +func NewGaugeFunc(opts *GaugeOpts, function func() float64) GaugeFunc { + v := parseVersion(version.Get()) + + return newGaugeFunc(opts, function, v) +} + +// WithContext returns wrapped GaugeVec with context +func (v *GaugeVec) WithContext(ctx context.Context) *GaugeVecWithContext { + return &GaugeVecWithContext{ + ctx: ctx, + GaugeVec: v, + } +} + +func (v *GaugeVec) InterfaceWithContext(ctx context.Context) GaugeVecMetric { + return v.WithContext(ctx) +} + +// GaugeVecWithContext is the wrapper of GaugeVec with context. +type GaugeVecWithContext struct { + *GaugeVec + ctx context.Context +} + +// WithLabelValues is the wrapper of GaugeVec.WithLabelValues. +func (vc *GaugeVecWithContext) WithLabelValues(lvs ...string) GaugeMetric { + return vc.GaugeVec.WithLabelValues(lvs...) +} + +// With is the wrapper of GaugeVec.With. +func (vc *GaugeVecWithContext) With(labels map[string]string) GaugeMetric { + return vc.GaugeVec.With(labels) +} diff --git a/vendor/k8s.io/component-base/metrics/histogram.go b/vendor/k8s.io/component-base/metrics/histogram.go new file mode 100644 index 000000000..e6884f35c --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/histogram.go @@ -0,0 +1,214 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "context" + + "github.com/blang/semver/v4" + "github.com/prometheus/client_golang/prometheus" +) + +// Histogram is our internal representation for our wrapping struct around prometheus +// histograms. Summary implements both kubeCollector and ObserverMetric +type Histogram struct { + ObserverMetric + *HistogramOpts + lazyMetric + selfCollector +} + +// NewHistogram returns an object which is Histogram-like. However, nothing +// will be measured until the histogram is registered somewhere. +func NewHistogram(opts *HistogramOpts) *Histogram { + opts.StabilityLevel.setDefaults() + + h := &Histogram{ + HistogramOpts: opts, + lazyMetric: lazyMetric{stabilityLevel: opts.StabilityLevel}, + } + h.setPrometheusHistogram(noopMetric{}) + h.lazyInit(h, BuildFQName(opts.Namespace, opts.Subsystem, opts.Name)) + return h +} + +// setPrometheusHistogram sets the underlying KubeGauge object, i.e. the thing that does the measurement. +func (h *Histogram) setPrometheusHistogram(histogram prometheus.Histogram) { + h.ObserverMetric = histogram + h.initSelfCollection(histogram) +} + +// DeprecatedVersion returns a pointer to the Version or nil +func (h *Histogram) DeprecatedVersion() *semver.Version { + return parseSemver(h.HistogramOpts.DeprecatedVersion) +} + +// initializeMetric invokes the actual prometheus.Histogram object instantiation +// and stores a reference to it +func (h *Histogram) initializeMetric() { + h.HistogramOpts.annotateStabilityLevel() + // this actually creates the underlying prometheus gauge. + h.setPrometheusHistogram(prometheus.NewHistogram(h.HistogramOpts.toPromHistogramOpts())) +} + +// initializeDeprecatedMetric invokes the actual prometheus.Histogram object instantiation +// but modifies the Help description prior to object instantiation. +func (h *Histogram) initializeDeprecatedMetric() { + h.HistogramOpts.markDeprecated() + h.initializeMetric() +} + +// WithContext allows the normal Histogram metric to pass in context. The context is no-op now. +func (h *Histogram) WithContext(ctx context.Context) ObserverMetric { + return h.ObserverMetric +} + +// HistogramVec is the internal representation of our wrapping struct around prometheus +// histogramVecs. +type HistogramVec struct { + *prometheus.HistogramVec + *HistogramOpts + lazyMetric + originalLabels []string +} + +// NewHistogramVec returns an object which satisfies kubeCollector and wraps the +// prometheus.HistogramVec object. However, the object returned will not measure +// anything unless the collector is first registered, since the metric is lazily instantiated, +// and only members extracted after +// registration will actually measure anything. + +func NewHistogramVec(opts *HistogramOpts, labels []string) *HistogramVec { + opts.StabilityLevel.setDefaults() + + fqName := BuildFQName(opts.Namespace, opts.Subsystem, opts.Name) + allowListLock.RLock() + if allowList, ok := labelValueAllowLists[fqName]; ok { + opts.LabelValueAllowLists = allowList + } + allowListLock.RUnlock() + + v := &HistogramVec{ + HistogramVec: noopHistogramVec, + HistogramOpts: opts, + originalLabels: labels, + lazyMetric: lazyMetric{stabilityLevel: opts.StabilityLevel}, + } + v.lazyInit(v, fqName) + return v +} + +// DeprecatedVersion returns a pointer to the Version or nil +func (v *HistogramVec) DeprecatedVersion() *semver.Version { + return parseSemver(v.HistogramOpts.DeprecatedVersion) +} + +func (v *HistogramVec) initializeMetric() { + v.HistogramOpts.annotateStabilityLevel() + v.HistogramVec = prometheus.NewHistogramVec(v.HistogramOpts.toPromHistogramOpts(), v.originalLabels) +} + +func (v *HistogramVec) initializeDeprecatedMetric() { + v.HistogramOpts.markDeprecated() + v.initializeMetric() +} + +// Default Prometheus Vec behavior is that member extraction results in creation of a new element +// if one with the unique label values is not found in the underlying stored metricMap. +// This means that if this function is called but the underlying metric is not registered +// (which means it will never be exposed externally nor consumed), the metric will exist in memory +// for perpetuity (i.e. throughout application lifecycle). +// +// For reference: https://github.com/prometheus/client_golang/blob/v0.9.2/prometheus/histogram.go#L460-L470 +// +// In contrast, the Vec behavior in this package is that member extraction before registration +// returns a permanent noop object. + +// WithLabelValues returns the ObserverMetric for the given slice of label +// values (same order as the VariableLabels in Desc). If that combination of +// label values is accessed for the first time, a new ObserverMetric is created IFF the HistogramVec +// has been registered to a metrics registry. +func (v *HistogramVec) WithLabelValues(lvs ...string) ObserverMetric { + if !v.IsCreated() { + return noop + } + if v.LabelValueAllowLists != nil { + v.LabelValueAllowLists.ConstrainToAllowedList(v.originalLabels, lvs) + } + return v.HistogramVec.WithLabelValues(lvs...) +} + +// With returns the ObserverMetric for the given Labels map (the label names +// must match those of the VariableLabels in Desc). If that label map is +// accessed for the first time, a new ObserverMetric is created IFF the HistogramVec has +// been registered to a metrics registry. +func (v *HistogramVec) With(labels map[string]string) ObserverMetric { + if !v.IsCreated() { + return noop + } + if v.LabelValueAllowLists != nil { + v.LabelValueAllowLists.ConstrainLabelMap(labels) + } + return v.HistogramVec.With(labels) +} + +// Delete deletes the metric where the variable labels are the same as those +// passed in as labels. It returns true if a metric was deleted. +// +// It is not an error if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc. However, such inconsistent Labels +// can never match an actual metric, so the method will always return false in +// that case. +func (v *HistogramVec) Delete(labels map[string]string) bool { + if !v.IsCreated() { + return false // since we haven't created the metric, we haven't deleted a metric with the passed in values + } + return v.HistogramVec.Delete(labels) +} + +// Reset deletes all metrics in this vector. +func (v *HistogramVec) Reset() { + if !v.IsCreated() { + return + } + + v.HistogramVec.Reset() +} + +// WithContext returns wrapped HistogramVec with context +func (v *HistogramVec) WithContext(ctx context.Context) *HistogramVecWithContext { + return &HistogramVecWithContext{ + ctx: ctx, + HistogramVec: v, + } +} + +// HistogramVecWithContext is the wrapper of HistogramVec with context. +type HistogramVecWithContext struct { + *HistogramVec + ctx context.Context +} + +// WithLabelValues is the wrapper of HistogramVec.WithLabelValues. +func (vc *HistogramVecWithContext) WithLabelValues(lvs ...string) ObserverMetric { + return vc.HistogramVec.WithLabelValues(lvs...) +} + +// With is the wrapper of HistogramVec.With. +func (vc *HistogramVecWithContext) With(labels map[string]string) ObserverMetric { + return vc.HistogramVec.With(labels) +} diff --git a/vendor/k8s.io/component-base/metrics/http.go b/vendor/k8s.io/component-base/metrics/http.go new file mode 100644 index 000000000..2a0d249c2 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/http.go @@ -0,0 +1,87 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "io" + "net/http" + "time" + + "github.com/prometheus/client_golang/prometheus/promhttp" +) + +var ( + processStartedAt time.Time +) + +func init() { + processStartedAt = time.Now() +} + +// These constants cause handlers serving metrics to behave as described if +// errors are encountered. +const ( + // HTTPErrorOnError serve an HTTP status code 500 upon the first error + // encountered. Report the error message in the body. + HTTPErrorOnError promhttp.HandlerErrorHandling = iota + + // ContinueOnError ignore errors and try to serve as many metrics as possible. + // However, if no metrics can be served, serve an HTTP status code 500 and the + // last error message in the body. Only use this in deliberate "best + // effort" metrics collection scenarios. In this case, it is highly + // recommended to provide other means of detecting errors: By setting an + // ErrorLog in HandlerOpts, the errors are logged. By providing a + // Registry in HandlerOpts, the exposed metrics include an error counter + // "promhttp_metric_handler_errors_total", which can be used for + // alerts. + ContinueOnError + + // PanicOnError panics upon the first error encountered (useful for "crash only" apps). + PanicOnError +) + +// HandlerOpts specifies options how to serve metrics via an http.Handler. The +// zero value of HandlerOpts is a reasonable default. +type HandlerOpts promhttp.HandlerOpts + +func (ho *HandlerOpts) toPromhttpHandlerOpts() promhttp.HandlerOpts { + ho.ProcessStartTime = processStartedAt + return promhttp.HandlerOpts(*ho) +} + +// HandlerFor returns an uninstrumented http.Handler for the provided +// Gatherer. The behavior of the Handler is defined by the provided +// HandlerOpts. Thus, HandlerFor is useful to create http.Handlers for custom +// Gatherers, with non-default HandlerOpts, and/or with custom (or no) +// instrumentation. Use the InstrumentMetricHandler function to apply the same +// kind of instrumentation as it is used by the Handler function. +func HandlerFor(reg Gatherer, opts HandlerOpts) http.Handler { + return promhttp.HandlerFor(reg, opts.toPromhttpHandlerOpts()) +} + +// HandlerWithReset return an http.Handler with Reset +func HandlerWithReset(reg KubeRegistry, opts HandlerOpts) http.Handler { + defaultHandler := promhttp.HandlerFor(reg, opts.toPromhttpHandlerOpts()) + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Method == http.MethodDelete { + reg.Reset() + io.WriteString(w, "metrics reset\n") + return + } + defaultHandler.ServeHTTP(w, r) + }) +} diff --git a/vendor/k8s.io/component-base/metrics/labels.go b/vendor/k8s.io/component-base/metrics/labels.go new file mode 100644 index 000000000..11af3ae42 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/labels.go @@ -0,0 +1,22 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import "github.com/prometheus/client_golang/prometheus" + +// Labels represents a collection of label name -> value mappings. +type Labels prometheus.Labels diff --git a/vendor/k8s.io/component-base/metrics/legacyregistry/registry.go b/vendor/k8s.io/component-base/metrics/legacyregistry/registry.go new file mode 100644 index 000000000..64a430b79 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/legacyregistry/registry.go @@ -0,0 +1,92 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package legacyregistry + +import ( + "net/http" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/collectors" + "github.com/prometheus/client_golang/prometheus/promhttp" + + "k8s.io/component-base/metrics" +) + +var ( + defaultRegistry = metrics.NewKubeRegistry() + // DefaultGatherer exposes the global registry gatherer + DefaultGatherer metrics.Gatherer = defaultRegistry + // Reset calls reset on the global registry + Reset = defaultRegistry.Reset + // MustRegister registers registerable metrics but uses the global registry. + MustRegister = defaultRegistry.MustRegister + // RawMustRegister registers prometheus collectors but uses the global registry, this + // bypasses the metric stability framework + // + // Deprecated + RawMustRegister = defaultRegistry.RawMustRegister + + // Register registers a collectable metric but uses the global registry + Register = defaultRegistry.Register + + // Registerer exposes the global registerer + Registerer = defaultRegistry.Registerer + + processStart time.Time +) + +func init() { + RawMustRegister(collectors.NewProcessCollector(collectors.ProcessCollectorOpts{})) + RawMustRegister(collectors.NewGoCollector(collectors.WithGoCollectorRuntimeMetrics(collectors.MetricsAll))) + defaultRegistry.RegisterMetaMetrics() + processStart = time.Now() +} + +// Handler returns an HTTP handler for the DefaultGatherer. It is +// already instrumented with InstrumentHandler (using "prometheus" as handler +// name). +func Handler() http.Handler { + return promhttp.InstrumentMetricHandler(prometheus.DefaultRegisterer, promhttp.HandlerFor(defaultRegistry, promhttp.HandlerOpts{ProcessStartTime: processStart})) +} + +// HandlerWithReset returns an HTTP handler for the DefaultGatherer but invokes +// registry reset if the http method is DELETE. +func HandlerWithReset() http.Handler { + return promhttp.InstrumentMetricHandler( + prometheus.DefaultRegisterer, + metrics.HandlerWithReset(defaultRegistry, metrics.HandlerOpts{ProcessStartTime: processStart})) +} + +// CustomRegister registers a custom collector but uses the global registry. +func CustomRegister(c metrics.StableCollector) error { + err := defaultRegistry.CustomRegister(c) + + //TODO(RainbowMango): Maybe we can wrap this error by error wrapping.(Golang 1.13) + _ = prometheus.Register(c) + + return err +} + +// CustomMustRegister registers custom collectors but uses the global registry. +func CustomMustRegister(cs ...metrics.StableCollector) { + defaultRegistry.CustomMustRegister(cs...) + + for _, c := range cs { + prometheus.MustRegister(c) + } +} diff --git a/vendor/k8s.io/component-base/metrics/metric.go b/vendor/k8s.io/component-base/metrics/metric.go new file mode 100644 index 000000000..3b22d21ef --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/metric.go @@ -0,0 +1,235 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "sync" + + "github.com/blang/semver/v4" + "github.com/prometheus/client_golang/prometheus" + dto "github.com/prometheus/client_model/go" + promext "k8s.io/component-base/metrics/prometheusextension" + + "k8s.io/klog/v2" +) + +/* +kubeCollector extends the prometheus.Collector interface to allow customization of the metric +registration process. Defer metric initialization until Create() is called, which then +delegates to the underlying metric's initializeMetric or initializeDeprecatedMetric +method call depending on whether the metric is deprecated or not. +*/ +type kubeCollector interface { + Collector + lazyKubeMetric + DeprecatedVersion() *semver.Version + // Each collector metric should provide an initialization function + // for both deprecated and non-deprecated variants of a metric. This + // is necessary since metric instantiation will be deferred + // until the metric is actually registered somewhere. + initializeMetric() + initializeDeprecatedMetric() +} + +/* +lazyKubeMetric defines our metric registration interface. lazyKubeMetric objects are expected +to lazily instantiate metrics (i.e defer metric instantiation until when +the Create() function is explicitly called). +*/ +type lazyKubeMetric interface { + Create(*semver.Version) bool + IsCreated() bool + IsHidden() bool + IsDeprecated() bool +} + +/* +lazyMetric implements lazyKubeMetric. A lazy metric is lazy because it waits until metric +registration time before instantiation. Add it as an anonymous field to a struct that +implements kubeCollector to get deferred registration behavior. You must call lazyInit +with the kubeCollector itself as an argument. +*/ +type lazyMetric struct { + fqName string + isDeprecated bool + isHidden bool + isCreated bool + createLock sync.RWMutex + markDeprecationOnce sync.Once + createOnce sync.Once + self kubeCollector + stabilityLevel StabilityLevel +} + +func (r *lazyMetric) IsCreated() bool { + r.createLock.RLock() + defer r.createLock.RUnlock() + return r.isCreated +} + +// lazyInit provides the lazyMetric with a reference to the kubeCollector it is supposed +// to allow lazy initialization for. It should be invoked in the factory function which creates new +// kubeCollector type objects. +func (r *lazyMetric) lazyInit(self kubeCollector, fqName string) { + r.fqName = fqName + r.self = self +} + +// preprocessMetric figures out whether the lazy metric should be hidden or not. +// This method takes a Version argument which should be the version of the binary in which +// this code is currently being executed. A metric can be hidden under two conditions: +// 1. if the metric is deprecated and is outside the grace period (i.e. has been +// deprecated for more than one release +// 2. if the metric is manually disabled via a CLI flag. +// +// Disclaimer: disabling a metric via a CLI flag has higher precedence than +// deprecation and will override show-hidden-metrics for the explicitly +// disabled metric. +func (r *lazyMetric) preprocessMetric(version semver.Version) { + disabledMetricsLock.RLock() + defer disabledMetricsLock.RUnlock() + // disabling metrics is higher in precedence than showing hidden metrics + if _, ok := disabledMetrics[r.fqName]; ok { + r.isHidden = true + return + } + selfVersion := r.self.DeprecatedVersion() + if selfVersion == nil { + return + } + r.markDeprecationOnce.Do(func() { + if selfVersion.LTE(version) { + r.isDeprecated = true + } + + if ShouldShowHidden() { + klog.Warningf("Hidden metrics (%s) have been manually overridden, showing this very deprecated metric.", r.fqName) + return + } + if shouldHide(&version, selfVersion) { + // TODO(RainbowMango): Remove this log temporarily. https://github.com/kubernetes/kubernetes/issues/85369 + // klog.Warningf("This metric has been deprecated for more than one release, hiding.") + r.isHidden = true + } + }) +} + +func (r *lazyMetric) IsHidden() bool { + return r.isHidden +} + +func (r *lazyMetric) IsDeprecated() bool { + return r.isDeprecated +} + +// Create forces the initialization of metric which has been deferred until +// the point at which this method is invoked. This method will determine whether +// the metric is deprecated or hidden, no-opting if the metric should be considered +// hidden. Furthermore, this function no-opts and returns true if metric is already +// created. +func (r *lazyMetric) Create(version *semver.Version) bool { + if version != nil { + r.preprocessMetric(*version) + } + // let's not create if this metric is slated to be hidden + if r.IsHidden() { + return false + } + + r.createOnce.Do(func() { + r.createLock.Lock() + defer r.createLock.Unlock() + r.isCreated = true + if r.IsDeprecated() { + r.self.initializeDeprecatedMetric() + } else { + r.self.initializeMetric() + } + }) + sl := r.stabilityLevel + deprecatedV := r.self.DeprecatedVersion() + dv := "" + if deprecatedV != nil { + dv = deprecatedV.String() + } + registeredMetrics.WithLabelValues(string(sl), dv).Inc() + return r.IsCreated() +} + +// ClearState will clear all the states marked by Create. +// It intends to be used for re-register a hidden metric. +func (r *lazyMetric) ClearState() { + r.createLock.Lock() + defer r.createLock.Unlock() + + r.isDeprecated = false + r.isHidden = false + r.isCreated = false + r.markDeprecationOnce = sync.Once{} + r.createOnce = sync.Once{} +} + +// FQName returns the fully-qualified metric name of the collector. +func (r *lazyMetric) FQName() string { + return r.fqName +} + +/* +This code is directly lifted from the prometheus codebase. It's a convenience struct which +allows you satisfy the Collector interface automatically if you already satisfy the Metric interface. + +For reference: https://github.com/prometheus/client_golang/blob/v0.9.2/prometheus/collector.go#L98-L120 +*/ +type selfCollector struct { + metric prometheus.Metric +} + +func (c *selfCollector) initSelfCollection(m prometheus.Metric) { + c.metric = m +} + +func (c *selfCollector) Describe(ch chan<- *prometheus.Desc) { + ch <- c.metric.Desc() +} + +func (c *selfCollector) Collect(ch chan<- prometheus.Metric) { + ch <- c.metric +} + +// no-op vecs for convenience +var noopCounterVec = &prometheus.CounterVec{} +var noopHistogramVec = &prometheus.HistogramVec{} +var noopTimingHistogramVec = &promext.TimingHistogramVec{} +var noopGaugeVec = &prometheus.GaugeVec{} + +// just use a convenience struct for all the no-ops +var noop = &noopMetric{} + +type noopMetric struct{} + +func (noopMetric) Inc() {} +func (noopMetric) Add(float64) {} +func (noopMetric) Dec() {} +func (noopMetric) Set(float64) {} +func (noopMetric) Sub(float64) {} +func (noopMetric) Observe(float64) {} +func (noopMetric) ObserveWithWeight(float64, uint64) {} +func (noopMetric) SetToCurrentTime() {} +func (noopMetric) Desc() *prometheus.Desc { return nil } +func (noopMetric) Write(*dto.Metric) error { return nil } +func (noopMetric) Describe(chan<- *prometheus.Desc) {} +func (noopMetric) Collect(chan<- prometheus.Metric) {} diff --git a/vendor/k8s.io/component-base/metrics/options.go b/vendor/k8s.io/component-base/metrics/options.go new file mode 100644 index 000000000..7a59b7ba1 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/options.go @@ -0,0 +1,125 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "fmt" + "regexp" + + "github.com/blang/semver/v4" + "github.com/spf13/pflag" + + "k8s.io/component-base/version" +) + +// Options has all parameters needed for exposing metrics from components +type Options struct { + ShowHiddenMetricsForVersion string + DisabledMetrics []string + AllowListMapping map[string]string +} + +// NewOptions returns default metrics options +func NewOptions() *Options { + return &Options{} +} + +// Validate validates metrics flags options. +func (o *Options) Validate() []error { + var errs []error + err := validateShowHiddenMetricsVersion(parseVersion(version.Get()), o.ShowHiddenMetricsForVersion) + if err != nil { + errs = append(errs, err) + } + + if err := validateAllowMetricLabel(o.AllowListMapping); err != nil { + errs = append(errs, err) + } + + if len(errs) == 0 { + return nil + } + return errs +} + +// AddFlags adds flags for exposing component metrics. +func (o *Options) AddFlags(fs *pflag.FlagSet) { + if o == nil { + return + } + fs.StringVar(&o.ShowHiddenMetricsForVersion, "show-hidden-metrics-for-version", o.ShowHiddenMetricsForVersion, + "The previous version for which you want to show hidden metrics. "+ + "Only the previous minor version is meaningful, other values will not be allowed. "+ + "The format is ., e.g.: '1.16'. "+ + "The purpose of this format is make sure you have the opportunity to notice if the next release hides additional metrics, "+ + "rather than being surprised when they are permanently removed in the release after that.") + fs.StringSliceVar(&o.DisabledMetrics, + "disabled-metrics", + o.DisabledMetrics, + "This flag provides an escape hatch for misbehaving metrics. "+ + "You must provide the fully qualified metric name in order to disable it. "+ + "Disclaimer: disabling metrics is higher in precedence than showing hidden metrics.") + fs.StringToStringVar(&o.AllowListMapping, "allow-metric-labels", o.AllowListMapping, + "The map from metric-label to value allow-list of this label. The key's format is ,. "+ + "The value's format is ,..."+ + "e.g. metric1,label1='v1,v2,v3', metric1,label2='v1,v2,v3' metric2,label1='v1,v2,v3'.") +} + +// Apply applies parameters into global configuration of metrics. +func (o *Options) Apply() { + if o == nil { + return + } + if len(o.ShowHiddenMetricsForVersion) > 0 { + SetShowHidden() + } + // set disabled metrics + for _, metricName := range o.DisabledMetrics { + SetDisabledMetric(metricName) + } + if o.AllowListMapping != nil { + SetLabelAllowListFromCLI(o.AllowListMapping) + } +} + +func validateShowHiddenMetricsVersion(currentVersion semver.Version, targetVersionStr string) error { + if targetVersionStr == "" { + return nil + } + + validVersionStr := fmt.Sprintf("%d.%d", currentVersion.Major, currentVersion.Minor-1) + if targetVersionStr != validVersionStr { + return fmt.Errorf("--show-hidden-metrics-for-version must be omitted or have the value '%v'. Only the previous minor version is allowed", validVersionStr) + } + + return nil +} + +func validateAllowMetricLabel(allowListMapping map[string]string) error { + if allowListMapping == nil { + return nil + } + metricNameRegex := `[a-zA-Z_:][a-zA-Z0-9_:]*` + labelRegex := `[a-zA-Z_][a-zA-Z0-9_]*` + for k := range allowListMapping { + reg := regexp.MustCompile(metricNameRegex + `,` + labelRegex) + if reg.FindString(k) != k { + return fmt.Errorf("--allow-metric-labels must has a list of kv pair with format `metricName:labelName=labelValue, labelValue,...`") + } + } + return nil +} diff --git a/vendor/k8s.io/component-base/metrics/opts.go b/vendor/k8s.io/component-base/metrics/opts.go new file mode 100644 index 000000000..49d2d40bb --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/opts.go @@ -0,0 +1,356 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "fmt" + "strings" + "sync" + "time" + + "github.com/prometheus/client_golang/prometheus" + "k8s.io/apimachinery/pkg/util/sets" + promext "k8s.io/component-base/metrics/prometheusextension" +) + +var ( + labelValueAllowLists = map[string]*MetricLabelAllowList{} + allowListLock sync.RWMutex +) + +// KubeOpts is superset struct for prometheus.Opts. The prometheus Opts structure +// is purposefully not embedded here because that would change struct initialization +// in the manner which people are currently accustomed. +// +// Name must be set to a non-empty string. DeprecatedVersion is defined only +// if the metric for which this options applies is, in fact, deprecated. +type KubeOpts struct { + Namespace string + Subsystem string + Name string + Help string + ConstLabels map[string]string + DeprecatedVersion string + deprecateOnce sync.Once + annotateOnce sync.Once + StabilityLevel StabilityLevel + LabelValueAllowLists *MetricLabelAllowList +} + +// BuildFQName joins the given three name components by "_". Empty name +// components are ignored. If the name parameter itself is empty, an empty +// string is returned, no matter what. Metric implementations included in this +// library use this function internally to generate the fully-qualified metric +// name from the name component in their Opts. Users of the library will only +// need this function if they implement their own Metric or instantiate a Desc +// (with NewDesc) directly. +func BuildFQName(namespace, subsystem, name string) string { + return prometheus.BuildFQName(namespace, subsystem, name) +} + +// StabilityLevel represents the API guarantees for a given defined metric. +type StabilityLevel string + +const ( + // INTERNAL metrics have no stability guarantees, as such, labels may + // be arbitrarily added/removed and the metric may be deleted at any time. + INTERNAL StabilityLevel = "INTERNAL" + // ALPHA metrics have no stability guarantees, as such, labels may + // be arbitrarily added/removed and the metric may be deleted at any time. + ALPHA StabilityLevel = "ALPHA" + // BETA metrics are governed by the deprecation policy outlined in by + // the control plane metrics stability KEP. + BETA StabilityLevel = "BETA" + // STABLE metrics are guaranteed not be mutated and removal is governed by + // the deprecation policy outlined in by the control plane metrics stability KEP. + STABLE StabilityLevel = "STABLE" +) + +// setDefaults takes 'ALPHA' in case of empty. +func (sl *StabilityLevel) setDefaults() { + switch *sl { + case "": + *sl = ALPHA + default: + // no-op, since we have a StabilityLevel already + } +} + +// CounterOpts is an alias for Opts. See there for doc comments. +type CounterOpts KubeOpts + +// Modify help description on the metric description. +func (o *CounterOpts) markDeprecated() { + o.deprecateOnce.Do(func() { + o.Help = fmt.Sprintf("(Deprecated since %v) %v", o.DeprecatedVersion, o.Help) + }) +} + +// annotateStabilityLevel annotates help description on the metric description with the stability level +// of the metric +func (o *CounterOpts) annotateStabilityLevel() { + o.annotateOnce.Do(func() { + o.Help = fmt.Sprintf("[%v] %v", o.StabilityLevel, o.Help) + }) +} + +// convenience function to allow easy transformation to the prometheus +// counterpart. This will do more once we have a proper label abstraction +func (o *CounterOpts) toPromCounterOpts() prometheus.CounterOpts { + return prometheus.CounterOpts{ + Namespace: o.Namespace, + Subsystem: o.Subsystem, + Name: o.Name, + Help: o.Help, + ConstLabels: o.ConstLabels, + } +} + +// GaugeOpts is an alias for Opts. See there for doc comments. +type GaugeOpts KubeOpts + +// Modify help description on the metric description. +func (o *GaugeOpts) markDeprecated() { + o.deprecateOnce.Do(func() { + o.Help = fmt.Sprintf("(Deprecated since %v) %v", o.DeprecatedVersion, o.Help) + }) +} + +// annotateStabilityLevel annotates help description on the metric description with the stability level +// of the metric +func (o *GaugeOpts) annotateStabilityLevel() { + o.annotateOnce.Do(func() { + o.Help = fmt.Sprintf("[%v] %v", o.StabilityLevel, o.Help) + }) +} + +// convenience function to allow easy transformation to the prometheus +// counterpart. This will do more once we have a proper label abstraction +func (o *GaugeOpts) toPromGaugeOpts() prometheus.GaugeOpts { + return prometheus.GaugeOpts{ + Namespace: o.Namespace, + Subsystem: o.Subsystem, + Name: o.Name, + Help: o.Help, + ConstLabels: o.ConstLabels, + } +} + +// HistogramOpts bundles the options for creating a Histogram metric. It is +// mandatory to set Name to a non-empty string. All other fields are optional +// and can safely be left at their zero value, although it is strongly +// encouraged to set a Help string. +type HistogramOpts struct { + Namespace string + Subsystem string + Name string + Help string + ConstLabels map[string]string + Buckets []float64 + DeprecatedVersion string + deprecateOnce sync.Once + annotateOnce sync.Once + StabilityLevel StabilityLevel + LabelValueAllowLists *MetricLabelAllowList +} + +// Modify help description on the metric description. +func (o *HistogramOpts) markDeprecated() { + o.deprecateOnce.Do(func() { + o.Help = fmt.Sprintf("(Deprecated since %v) %v", o.DeprecatedVersion, o.Help) + }) +} + +// annotateStabilityLevel annotates help description on the metric description with the stability level +// of the metric +func (o *HistogramOpts) annotateStabilityLevel() { + o.annotateOnce.Do(func() { + o.Help = fmt.Sprintf("[%v] %v", o.StabilityLevel, o.Help) + }) +} + +// convenience function to allow easy transformation to the prometheus +// counterpart. This will do more once we have a proper label abstraction +func (o *HistogramOpts) toPromHistogramOpts() prometheus.HistogramOpts { + return prometheus.HistogramOpts{ + Namespace: o.Namespace, + Subsystem: o.Subsystem, + Name: o.Name, + Help: o.Help, + ConstLabels: o.ConstLabels, + Buckets: o.Buckets, + } +} + +// TimingHistogramOpts bundles the options for creating a TimingHistogram metric. It is +// mandatory to set Name to a non-empty string. All other fields are optional +// and can safely be left at their zero value, although it is strongly +// encouraged to set a Help string. +type TimingHistogramOpts struct { + Namespace string + Subsystem string + Name string + Help string + ConstLabels map[string]string + Buckets []float64 + InitialValue float64 + DeprecatedVersion string + deprecateOnce sync.Once + annotateOnce sync.Once + StabilityLevel StabilityLevel + LabelValueAllowLists *MetricLabelAllowList +} + +// Modify help description on the metric description. +func (o *TimingHistogramOpts) markDeprecated() { + o.deprecateOnce.Do(func() { + o.Help = fmt.Sprintf("(Deprecated since %v) %v", o.DeprecatedVersion, o.Help) + }) +} + +// annotateStabilityLevel annotates help description on the metric description with the stability level +// of the metric +func (o *TimingHistogramOpts) annotateStabilityLevel() { + o.annotateOnce.Do(func() { + o.Help = fmt.Sprintf("[%v] %v", o.StabilityLevel, o.Help) + }) +} + +// convenience function to allow easy transformation to the prometheus +// counterpart. This will do more once we have a proper label abstraction +func (o *TimingHistogramOpts) toPromHistogramOpts() promext.TimingHistogramOpts { + return promext.TimingHistogramOpts{ + Namespace: o.Namespace, + Subsystem: o.Subsystem, + Name: o.Name, + Help: o.Help, + ConstLabels: o.ConstLabels, + Buckets: o.Buckets, + InitialValue: o.InitialValue, + } +} + +// SummaryOpts bundles the options for creating a Summary metric. It is +// mandatory to set Name to a non-empty string. While all other fields are +// optional and can safely be left at their zero value, it is recommended to set +// a help string and to explicitly set the Objectives field to the desired value +// as the default value will change in the upcoming v0.10 of the library. +type SummaryOpts struct { + Namespace string + Subsystem string + Name string + Help string + ConstLabels map[string]string + Objectives map[float64]float64 + MaxAge time.Duration + AgeBuckets uint32 + BufCap uint32 + DeprecatedVersion string + deprecateOnce sync.Once + annotateOnce sync.Once + StabilityLevel StabilityLevel + LabelValueAllowLists *MetricLabelAllowList +} + +// Modify help description on the metric description. +func (o *SummaryOpts) markDeprecated() { + o.deprecateOnce.Do(func() { + o.Help = fmt.Sprintf("(Deprecated since %v) %v", o.DeprecatedVersion, o.Help) + }) +} + +// annotateStabilityLevel annotates help description on the metric description with the stability level +// of the metric +func (o *SummaryOpts) annotateStabilityLevel() { + o.annotateOnce.Do(func() { + o.Help = fmt.Sprintf("[%v] %v", o.StabilityLevel, o.Help) + }) +} + +// Deprecated: DefObjectives will not be used as the default objectives in +// v1.0.0 of the library. The default Summary will have no quantiles then. +var ( + defObjectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001} +) + +// convenience function to allow easy transformation to the prometheus +// counterpart. This will do more once we have a proper label abstraction +func (o *SummaryOpts) toPromSummaryOpts() prometheus.SummaryOpts { + // we need to retain existing quantile behavior for backwards compatibility, + // so let's do what prometheus used to do prior to v1. + objectives := o.Objectives + if objectives == nil { + objectives = defObjectives + } + return prometheus.SummaryOpts{ + Namespace: o.Namespace, + Subsystem: o.Subsystem, + Name: o.Name, + Help: o.Help, + ConstLabels: o.ConstLabels, + Objectives: objectives, + MaxAge: o.MaxAge, + AgeBuckets: o.AgeBuckets, + BufCap: o.BufCap, + } +} + +type MetricLabelAllowList struct { + labelToAllowList map[string]sets.String +} + +func (allowList *MetricLabelAllowList) ConstrainToAllowedList(labelNameList, labelValueList []string) { + for index, value := range labelValueList { + name := labelNameList[index] + if allowValues, ok := allowList.labelToAllowList[name]; ok { + if !allowValues.Has(value) { + labelValueList[index] = "unexpected" + } + } + } +} + +func (allowList *MetricLabelAllowList) ConstrainLabelMap(labels map[string]string) { + for name, value := range labels { + if allowValues, ok := allowList.labelToAllowList[name]; ok { + if !allowValues.Has(value) { + labels[name] = "unexpected" + } + } + } +} + +func SetLabelAllowListFromCLI(allowListMapping map[string]string) { + allowListLock.Lock() + defer allowListLock.Unlock() + for metricLabelName, labelValues := range allowListMapping { + metricName := strings.Split(metricLabelName, ",")[0] + labelName := strings.Split(metricLabelName, ",")[1] + valueSet := sets.NewString(strings.Split(labelValues, ",")...) + + allowList, ok := labelValueAllowLists[metricName] + if ok { + allowList.labelToAllowList[labelName] = valueSet + } else { + labelToAllowList := make(map[string]sets.String) + labelToAllowList[labelName] = valueSet + labelValueAllowLists[metricName] = &MetricLabelAllowList{ + labelToAllowList, + } + } + } +} diff --git a/vendor/k8s.io/component-base/metrics/processstarttime.go b/vendor/k8s.io/component-base/metrics/processstarttime.go new file mode 100644 index 000000000..4b5e76935 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/processstarttime.go @@ -0,0 +1,51 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "time" + + "k8s.io/klog/v2" +) + +var processStartTime = NewGaugeVec( + &GaugeOpts{ + Name: "process_start_time_seconds", + Help: "Start time of the process since unix epoch in seconds.", + StabilityLevel: ALPHA, + }, + []string{}, +) + +// RegisterProcessStartTime registers the process_start_time_seconds to +// a prometheus registry. This metric needs to be included to ensure counter +// data fidelity. +func RegisterProcessStartTime(registrationFunc func(Registerable) error) error { + start, err := getProcessStart() + if err != nil { + klog.Errorf("Could not get process start time, %v", err) + start = float64(time.Now().Unix()) + } + // processStartTime is a lazy metric which only get initialized after registered. + // so we need to register the metric first and then set the value for it + if err = registrationFunc(processStartTime); err != nil { + return err + } + + processStartTime.WithLabelValues().Set(start) + return nil +} diff --git a/vendor/k8s.io/component-base/metrics/processstarttime_others.go b/vendor/k8s.io/component-base/metrics/processstarttime_others.go new file mode 100644 index 000000000..a14cd8833 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/processstarttime_others.go @@ -0,0 +1,39 @@ +//go:build !windows +// +build !windows + +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "os" + + "github.com/prometheus/procfs" +) + +func getProcessStart() (float64, error) { + pid := os.Getpid() + p, err := procfs.NewProc(pid) + if err != nil { + return 0, err + } + + if stat, err := p.Stat(); err == nil { + return stat.StartTime() + } + return 0, err +} diff --git a/vendor/k8s.io/component-base/metrics/processstarttime_windows.go b/vendor/k8s.io/component-base/metrics/processstarttime_windows.go new file mode 100644 index 000000000..7813115e7 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/processstarttime_windows.go @@ -0,0 +1,34 @@ +//go:build windows +// +build windows + +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "golang.org/x/sys/windows" +) + +func getProcessStart() (float64, error) { + processHandle := windows.CurrentProcess() + + var creationTime, exitTime, kernelTime, userTime windows.Filetime + if err := windows.GetProcessTimes(processHandle, &creationTime, &exitTime, &kernelTime, &userTime); err != nil { + return 0, err + } + return float64(creationTime.Nanoseconds() / 1e9), nil +} diff --git a/vendor/k8s.io/component-base/metrics/prometheusextension/timing_histogram.go b/vendor/k8s.io/component-base/metrics/prometheusextension/timing_histogram.go new file mode 100644 index 000000000..be07977e2 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/prometheusextension/timing_histogram.go @@ -0,0 +1,189 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package prometheusextension + +import ( + "errors" + "time" + + "github.com/prometheus/client_golang/prometheus" + dto "github.com/prometheus/client_model/go" +) + +// GaugeOps is the part of `prometheus.Gauge` that is relevant to +// instrumented code. +// This factoring should be in prometheus, analogous to the way +// it already factors out the Observer interface for histograms and summaries. +type GaugeOps interface { + // Set is the same as Gauge.Set + Set(float64) + // Inc is the same as Gauge.inc + Inc() + // Dec is the same as Gauge.Dec + Dec() + // Add is the same as Gauge.Add + Add(float64) + // Sub is the same as Gauge.Sub + Sub(float64) + + // SetToCurrentTime the same as Gauge.SetToCurrentTime + SetToCurrentTime() +} + +// A TimingHistogram tracks how long a `float64` variable spends in +// ranges defined by buckets. Time is counted in nanoseconds. The +// histogram's sum is the integral over time (in nanoseconds, from +// creation of the histogram) of the variable's value. +type TimingHistogram interface { + prometheus.Metric + prometheus.Collector + GaugeOps +} + +// TimingHistogramOpts is the parameters of the TimingHistogram constructor +type TimingHistogramOpts struct { + Namespace string + Subsystem string + Name string + Help string + ConstLabels prometheus.Labels + + // Buckets defines the buckets into which observations are + // accumulated. Each element in the slice is the upper + // inclusive bound of a bucket. The values must be sorted in + // strictly increasing order. There is no need to add a + // highest bucket with +Inf bound. The default value is + // prometheus.DefBuckets. + Buckets []float64 + + // The initial value of the variable. + InitialValue float64 +} + +// NewTimingHistogram creates a new TimingHistogram +func NewTimingHistogram(opts TimingHistogramOpts) (TimingHistogram, error) { + return NewTestableTimingHistogram(time.Now, opts) +} + +// NewTestableTimingHistogram creates a TimingHistogram that uses a mockable clock +func NewTestableTimingHistogram(nowFunc func() time.Time, opts TimingHistogramOpts) (TimingHistogram, error) { + desc := prometheus.NewDesc( + prometheus.BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + wrapTimingHelp(opts.Help), + nil, + opts.ConstLabels, + ) + return newTimingHistogram(nowFunc, desc, opts) +} + +func wrapTimingHelp(given string) string { + return "EXPERIMENTAL: " + given +} + +func newTimingHistogram(nowFunc func() time.Time, desc *prometheus.Desc, opts TimingHistogramOpts, variableLabelValues ...string) (TimingHistogram, error) { + allLabelsM := prometheus.Labels{} + allLabelsS := prometheus.MakeLabelPairs(desc, variableLabelValues) + for _, pair := range allLabelsS { + if pair == nil || pair.Name == nil || pair.Value == nil { + return nil, errors.New("prometheus.MakeLabelPairs returned a nil") + } + allLabelsM[*pair.Name] = *pair.Value + } + weighted, err := newWeightedHistogram(desc, WeightedHistogramOpts{ + Namespace: opts.Namespace, + Subsystem: opts.Subsystem, + Name: opts.Name, + Help: opts.Help, + ConstLabels: allLabelsM, + Buckets: opts.Buckets, + }, variableLabelValues...) + if err != nil { + return nil, err + } + return &timingHistogram{ + nowFunc: nowFunc, + weighted: weighted, + lastSetTime: nowFunc(), + value: opts.InitialValue, + }, nil +} + +type timingHistogram struct { + nowFunc func() time.Time + weighted *weightedHistogram + + // The following fields must only be accessed with weighted's lock held + + lastSetTime time.Time // identifies when value was last set + value float64 +} + +var _ TimingHistogram = &timingHistogram{} + +func (th *timingHistogram) Set(newValue float64) { + th.update(func(float64) float64 { return newValue }) +} + +func (th *timingHistogram) Inc() { + th.update(func(oldValue float64) float64 { return oldValue + 1 }) +} + +func (th *timingHistogram) Dec() { + th.update(func(oldValue float64) float64 { return oldValue - 1 }) +} + +func (th *timingHistogram) Add(delta float64) { + th.update(func(oldValue float64) float64 { return oldValue + delta }) +} + +func (th *timingHistogram) Sub(delta float64) { + th.update(func(oldValue float64) float64 { return oldValue - delta }) +} + +func (th *timingHistogram) SetToCurrentTime() { + th.update(func(oldValue float64) float64 { return th.nowFunc().Sub(time.Unix(0, 0)).Seconds() }) +} + +func (th *timingHistogram) update(updateFn func(float64) float64) { + th.weighted.lock.Lock() + defer th.weighted.lock.Unlock() + now := th.nowFunc() + delta := now.Sub(th.lastSetTime) + value := th.value + if delta > 0 { + th.weighted.observeWithWeightLocked(value, uint64(delta)) + th.lastSetTime = now + } + th.value = updateFn(value) +} + +func (th *timingHistogram) Desc() *prometheus.Desc { + return th.weighted.Desc() +} + +func (th *timingHistogram) Write(dest *dto.Metric) error { + th.Add(0) // account for time since last update + return th.weighted.Write(dest) +} + +func (th *timingHistogram) Describe(ch chan<- *prometheus.Desc) { + ch <- th.weighted.Desc() +} + +func (th *timingHistogram) Collect(ch chan<- prometheus.Metric) { + ch <- th +} diff --git a/vendor/k8s.io/component-base/metrics/prometheusextension/timing_histogram_vec.go b/vendor/k8s.io/component-base/metrics/prometheusextension/timing_histogram_vec.go new file mode 100644 index 000000000..7af1a4586 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/prometheusextension/timing_histogram_vec.go @@ -0,0 +1,111 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package prometheusextension + +import ( + "time" + + "github.com/prometheus/client_golang/prometheus" +) + +// GaugeVecOps is a bunch of Gauge that have the same +// Desc and are distinguished by the values for their variable labels. +type GaugeVecOps interface { + GetMetricWith(prometheus.Labels) (GaugeOps, error) + GetMetricWithLabelValues(lvs ...string) (GaugeOps, error) + With(prometheus.Labels) GaugeOps + WithLabelValues(...string) GaugeOps + CurryWith(prometheus.Labels) (GaugeVecOps, error) + MustCurryWith(prometheus.Labels) GaugeVecOps +} + +type TimingHistogramVec struct { + *prometheus.MetricVec +} + +var _ GaugeVecOps = &TimingHistogramVec{} +var _ prometheus.Collector = &TimingHistogramVec{} + +func NewTimingHistogramVec(opts TimingHistogramOpts, labelNames ...string) *TimingHistogramVec { + return NewTestableTimingHistogramVec(time.Now, opts, labelNames...) +} + +func NewTestableTimingHistogramVec(nowFunc func() time.Time, opts TimingHistogramOpts, labelNames ...string) *TimingHistogramVec { + desc := prometheus.NewDesc( + prometheus.BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + wrapTimingHelp(opts.Help), + labelNames, + opts.ConstLabels, + ) + return &TimingHistogramVec{ + MetricVec: prometheus.NewMetricVec(desc, func(lvs ...string) prometheus.Metric { + metric, err := newTimingHistogram(nowFunc, desc, opts, lvs...) + if err != nil { + panic(err) // like in prometheus.newHistogram + } + return metric + }), + } +} + +func (hv *TimingHistogramVec) GetMetricWith(labels prometheus.Labels) (GaugeOps, error) { + metric, err := hv.MetricVec.GetMetricWith(labels) + if metric != nil { + return metric.(GaugeOps), err + } + return nil, err +} + +func (hv *TimingHistogramVec) GetMetricWithLabelValues(lvs ...string) (GaugeOps, error) { + metric, err := hv.MetricVec.GetMetricWithLabelValues(lvs...) + if metric != nil { + return metric.(GaugeOps), err + } + return nil, err +} + +func (hv *TimingHistogramVec) With(labels prometheus.Labels) GaugeOps { + h, err := hv.GetMetricWith(labels) + if err != nil { + panic(err) + } + return h +} + +func (hv *TimingHistogramVec) WithLabelValues(lvs ...string) GaugeOps { + h, err := hv.GetMetricWithLabelValues(lvs...) + if err != nil { + panic(err) + } + return h +} + +func (hv *TimingHistogramVec) CurryWith(labels prometheus.Labels) (GaugeVecOps, error) { + vec, err := hv.MetricVec.CurryWith(labels) + if vec != nil { + return &TimingHistogramVec{MetricVec: vec}, err + } + return nil, err +} + +func (hv *TimingHistogramVec) MustCurryWith(labels prometheus.Labels) GaugeVecOps { + vec, err := hv.CurryWith(labels) + if err != nil { + panic(err) + } + return vec +} diff --git a/vendor/k8s.io/component-base/metrics/prometheusextension/weighted_histogram.go b/vendor/k8s.io/component-base/metrics/prometheusextension/weighted_histogram.go new file mode 100644 index 000000000..a060019b2 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/prometheusextension/weighted_histogram.go @@ -0,0 +1,203 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package prometheusextension + +import ( + "fmt" + "math" + "sort" + "sync" + + "github.com/prometheus/client_golang/prometheus" + dto "github.com/prometheus/client_model/go" +) + +// WeightedHistogram generalizes Histogram: each observation has +// an associated _weight_. For a given `x` and `N`, +// `1` call on `ObserveWithWeight(x, N)` has the same meaning as +// `N` calls on `ObserveWithWeight(x, 1)`. +// The weighted sum might differ slightly due to the use of +// floating point, although the implementation takes some steps +// to mitigate that. +// If every weight were 1, +// this would be the same as the existing Histogram abstraction. +type WeightedHistogram interface { + prometheus.Metric + prometheus.Collector + WeightedObserver +} + +// WeightedObserver generalizes the Observer interface. +type WeightedObserver interface { + // Set the variable to the given value with the given weight. + ObserveWithWeight(value float64, weight uint64) +} + +// WeightedHistogramOpts is the same as for an ordinary Histogram +type WeightedHistogramOpts = prometheus.HistogramOpts + +// NewWeightedHistogram creates a new WeightedHistogram +func NewWeightedHistogram(opts WeightedHistogramOpts) (WeightedHistogram, error) { + desc := prometheus.NewDesc( + prometheus.BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + wrapWeightedHelp(opts.Help), + nil, + opts.ConstLabels, + ) + return newWeightedHistogram(desc, opts) +} + +func wrapWeightedHelp(given string) string { + return "EXPERIMENTAL: " + given +} + +func newWeightedHistogram(desc *prometheus.Desc, opts WeightedHistogramOpts, variableLabelValues ...string) (*weightedHistogram, error) { + if len(opts.Buckets) == 0 { + opts.Buckets = prometheus.DefBuckets + } + + for i, upperBound := range opts.Buckets { + if i < len(opts.Buckets)-1 { + if upperBound >= opts.Buckets[i+1] { + return nil, fmt.Errorf( + "histogram buckets must be in increasing order: %f >= %f", + upperBound, opts.Buckets[i+1], + ) + } + } else { + if math.IsInf(upperBound, +1) { + // The +Inf bucket is implicit. Remove it here. + opts.Buckets = opts.Buckets[:i] + } + } + } + upperBounds := make([]float64, len(opts.Buckets)) + copy(upperBounds, opts.Buckets) + + return &weightedHistogram{ + desc: desc, + variableLabelValues: variableLabelValues, + upperBounds: upperBounds, + buckets: make([]uint64, len(upperBounds)+1), + hotCount: initialHotCount, + }, nil +} + +type weightedHistogram struct { + desc *prometheus.Desc + variableLabelValues []string + upperBounds []float64 // exclusive of +Inf + + lock sync.Mutex // applies to all the following + + // buckets is longer by one than upperBounds. + // For 0 <= idx < len(upperBounds), buckets[idx] holds the + // accumulated time.Duration that value has been <= + // upperBounds[idx] but not <= upperBounds[idx-1]. + // buckets[len(upperBounds)] holds the accumulated + // time.Duration when value fit in no other bucket. + buckets []uint64 + + // sumHot + sumCold is the weighted sum of value. + // Rather than risk loss of precision in one + // float64, we do this sum hierarchically. Many successive + // increments are added into sumHot; once in a while + // the magnitude of sumHot is compared to the magnitude + // of sumCold and, if the ratio is high enough, + // sumHot is transferred into sumCold. + sumHot float64 + sumCold float64 + + transferThreshold float64 // = math.Abs(sumCold) / 2^26 (that's about half of the bits of precision in a float64) + + // hotCount is used to decide when to consider dumping sumHot into sumCold. + // hotCount counts upward from initialHotCount to zero. + hotCount int +} + +// initialHotCount is the negative of the number of terms +// that are summed into sumHot before considering whether +// to transfer to sumCold. This only has to be big enough +// to make the extra floating point operations occur in a +// distinct minority of cases. +const initialHotCount = -15 + +var _ WeightedHistogram = &weightedHistogram{} +var _ prometheus.Metric = &weightedHistogram{} +var _ prometheus.Collector = &weightedHistogram{} + +func (sh *weightedHistogram) ObserveWithWeight(value float64, weight uint64) { + idx := sort.SearchFloat64s(sh.upperBounds, value) + sh.lock.Lock() + defer sh.lock.Unlock() + sh.updateLocked(idx, value, weight) +} + +func (sh *weightedHistogram) observeWithWeightLocked(value float64, weight uint64) { + idx := sort.SearchFloat64s(sh.upperBounds, value) + sh.updateLocked(idx, value, weight) +} + +func (sh *weightedHistogram) updateLocked(idx int, value float64, weight uint64) { + sh.buckets[idx] += weight + newSumHot := sh.sumHot + float64(weight)*value + sh.hotCount++ + if sh.hotCount >= 0 { + sh.hotCount = initialHotCount + if math.Abs(newSumHot) > sh.transferThreshold { + newSumCold := sh.sumCold + newSumHot + sh.sumCold = newSumCold + sh.transferThreshold = math.Abs(newSumCold / 67108864) + sh.sumHot = 0 + return + } + } + sh.sumHot = newSumHot +} + +func (sh *weightedHistogram) Desc() *prometheus.Desc { + return sh.desc +} + +func (sh *weightedHistogram) Write(dest *dto.Metric) error { + count, sum, buckets := func() (uint64, float64, map[float64]uint64) { + sh.lock.Lock() + defer sh.lock.Unlock() + nBounds := len(sh.upperBounds) + buckets := make(map[float64]uint64, nBounds) + var count uint64 + for idx, upperBound := range sh.upperBounds { + count += sh.buckets[idx] + buckets[upperBound] = count + } + count += sh.buckets[nBounds] + return count, sh.sumHot + sh.sumCold, buckets + }() + metric, err := prometheus.NewConstHistogram(sh.desc, count, sum, buckets, sh.variableLabelValues...) + if err != nil { + return err + } + return metric.Write(dest) +} + +func (sh *weightedHistogram) Describe(ch chan<- *prometheus.Desc) { + ch <- sh.desc +} + +func (sh *weightedHistogram) Collect(ch chan<- prometheus.Metric) { + ch <- sh +} diff --git a/vendor/k8s.io/component-base/metrics/prometheusextension/weighted_histogram_vec.go b/vendor/k8s.io/component-base/metrics/prometheusextension/weighted_histogram_vec.go new file mode 100644 index 000000000..2ca95f0a7 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/prometheusextension/weighted_histogram_vec.go @@ -0,0 +1,106 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package prometheusextension + +import ( + "github.com/prometheus/client_golang/prometheus" +) + +// WeightedObserverVec is a bunch of WeightedObservers that have the same +// Desc and are distinguished by the values for their variable labels. +type WeightedObserverVec interface { + GetMetricWith(prometheus.Labels) (WeightedObserver, error) + GetMetricWithLabelValues(lvs ...string) (WeightedObserver, error) + With(prometheus.Labels) WeightedObserver + WithLabelValues(...string) WeightedObserver + CurryWith(prometheus.Labels) (WeightedObserverVec, error) + MustCurryWith(prometheus.Labels) WeightedObserverVec +} + +// WeightedHistogramVec implements WeightedObserverVec +type WeightedHistogramVec struct { + *prometheus.MetricVec +} + +var _ WeightedObserverVec = &WeightedHistogramVec{} +var _ prometheus.Collector = &WeightedHistogramVec{} + +func NewWeightedHistogramVec(opts WeightedHistogramOpts, labelNames ...string) *WeightedHistogramVec { + desc := prometheus.NewDesc( + prometheus.BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + wrapWeightedHelp(opts.Help), + labelNames, + opts.ConstLabels, + ) + return &WeightedHistogramVec{ + MetricVec: prometheus.NewMetricVec(desc, func(lvs ...string) prometheus.Metric { + metric, err := newWeightedHistogram(desc, opts, lvs...) + if err != nil { + panic(err) // like in prometheus.newHistogram + } + return metric + }), + } +} + +func (hv *WeightedHistogramVec) GetMetricWith(labels prometheus.Labels) (WeightedObserver, error) { + metric, err := hv.MetricVec.GetMetricWith(labels) + if metric != nil { + return metric.(WeightedObserver), err + } + return nil, err +} + +func (hv *WeightedHistogramVec) GetMetricWithLabelValues(lvs ...string) (WeightedObserver, error) { + metric, err := hv.MetricVec.GetMetricWithLabelValues(lvs...) + if metric != nil { + return metric.(WeightedObserver), err + } + return nil, err +} + +func (hv *WeightedHistogramVec) With(labels prometheus.Labels) WeightedObserver { + h, err := hv.GetMetricWith(labels) + if err != nil { + panic(err) + } + return h +} + +func (hv *WeightedHistogramVec) WithLabelValues(lvs ...string) WeightedObserver { + h, err := hv.GetMetricWithLabelValues(lvs...) + if err != nil { + panic(err) + } + return h +} + +func (hv *WeightedHistogramVec) CurryWith(labels prometheus.Labels) (WeightedObserverVec, error) { + vec, err := hv.MetricVec.CurryWith(labels) + if vec != nil { + return &WeightedHistogramVec{MetricVec: vec}, err + } + return nil, err +} + +func (hv *WeightedHistogramVec) MustCurryWith(labels prometheus.Labels) WeightedObserverVec { + vec, err := hv.CurryWith(labels) + if err != nil { + panic(err) + } + return vec +} diff --git a/vendor/k8s.io/component-base/metrics/registry.go b/vendor/k8s.io/component-base/metrics/registry.go new file mode 100644 index 000000000..1942f9958 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/registry.go @@ -0,0 +1,385 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "fmt" + "sync" + "sync/atomic" + + "github.com/blang/semver/v4" + "github.com/prometheus/client_golang/prometheus" + dto "github.com/prometheus/client_model/go" + + apimachineryversion "k8s.io/apimachinery/pkg/version" + "k8s.io/component-base/version" +) + +var ( + showHiddenOnce sync.Once + disabledMetricsLock sync.RWMutex + showHidden atomic.Bool + registries []*kubeRegistry // stores all registries created by NewKubeRegistry() + registriesLock sync.RWMutex + disabledMetrics = map[string]struct{}{} + + registeredMetrics = NewCounterVec( + &CounterOpts{ + Name: "registered_metrics_total", + Help: "The count of registered metrics broken by stability level and deprecation version.", + StabilityLevel: BETA, + }, + []string{"stability_level", "deprecated_version"}, + ) + + disabledMetricsTotal = NewCounter( + &CounterOpts{ + Name: "disabled_metrics_total", + Help: "The count of disabled metrics.", + StabilityLevel: BETA, + }, + ) + + hiddenMetricsTotal = NewCounter( + &CounterOpts{ + Name: "hidden_metrics_total", + Help: "The count of hidden metrics.", + StabilityLevel: BETA, + }, + ) +) + +// shouldHide be used to check if a specific metric with deprecated version should be hidden +// according to metrics deprecation lifecycle. +func shouldHide(currentVersion *semver.Version, deprecatedVersion *semver.Version) bool { + guardVersion, err := semver.Make(fmt.Sprintf("%d.%d.0", currentVersion.Major, currentVersion.Minor)) + if err != nil { + panic("failed to make version from current version") + } + + if deprecatedVersion.LT(guardVersion) { + return true + } + + return false +} + +// ValidateShowHiddenMetricsVersion checks invalid version for which show hidden metrics. +func ValidateShowHiddenMetricsVersion(v string) []error { + err := validateShowHiddenMetricsVersion(parseVersion(version.Get()), v) + if err != nil { + return []error{err} + } + + return nil +} + +func SetDisabledMetric(name string) { + disabledMetricsLock.Lock() + defer disabledMetricsLock.Unlock() + disabledMetrics[name] = struct{}{} + disabledMetricsTotal.Inc() +} + +// SetShowHidden will enable showing hidden metrics. This will no-opt +// after the initial call +func SetShowHidden() { + showHiddenOnce.Do(func() { + showHidden.Store(true) + + // re-register collectors that has been hidden in phase of last registry. + for _, r := range registries { + r.enableHiddenCollectors() + r.enableHiddenStableCollectors() + } + }) +} + +// ShouldShowHidden returns whether showing hidden deprecated metrics +// is enabled. While the primary usecase for this is internal (to determine +// registration behavior) this can also be used to introspect +func ShouldShowHidden() bool { + return showHidden.Load() +} + +// Registerable is an interface for a collector metric which we +// will register with KubeRegistry. +type Registerable interface { + prometheus.Collector + + // Create will mark deprecated state for the collector + Create(version *semver.Version) bool + + // ClearState will clear all the states marked by Create. + ClearState() + + // FQName returns the fully-qualified metric name of the collector. + FQName() string +} + +type resettable interface { + Reset() +} + +// KubeRegistry is an interface which implements a subset of prometheus.Registerer and +// prometheus.Gatherer interfaces +type KubeRegistry interface { + // Deprecated + RawMustRegister(...prometheus.Collector) + // CustomRegister is our internal variant of Prometheus registry.Register + CustomRegister(c StableCollector) error + // CustomMustRegister is our internal variant of Prometheus registry.MustRegister + CustomMustRegister(cs ...StableCollector) + // Register conforms to Prometheus registry.Register + Register(Registerable) error + // MustRegister conforms to Prometheus registry.MustRegister + MustRegister(...Registerable) + // Unregister conforms to Prometheus registry.Unregister + Unregister(collector Collector) bool + // Gather conforms to Prometheus gatherer.Gather + Gather() ([]*dto.MetricFamily, error) + // Reset invokes the Reset() function on all items in the registry + // which are added as resettables. + Reset() + // RegisterMetaMetrics registers metrics about the number of registered metrics. + RegisterMetaMetrics() + // Registerer exposes the underlying prometheus registerer + Registerer() prometheus.Registerer + // Gatherer exposes the underlying prometheus gatherer + Gatherer() prometheus.Gatherer +} + +// kubeRegistry is a wrapper around a prometheus registry-type object. Upon initialization +// the kubernetes binary version information is loaded into the registry object, so that +// automatic behavior can be configured for metric versioning. +type kubeRegistry struct { + PromRegistry + version semver.Version + hiddenCollectors map[string]Registerable // stores all collectors that has been hidden + stableCollectors []StableCollector // stores all stable collector + hiddenCollectorsLock sync.RWMutex + stableCollectorsLock sync.RWMutex + resetLock sync.RWMutex + resettables []resettable +} + +// Register registers a new Collector to be included in metrics +// collection. It returns an error if the descriptors provided by the +// Collector are invalid or if they — in combination with descriptors of +// already registered Collectors — do not fulfill the consistency and +// uniqueness criteria described in the documentation of metric.Desc. +func (kr *kubeRegistry) Register(c Registerable) error { + if c.Create(&kr.version) { + defer kr.addResettable(c) + return kr.PromRegistry.Register(c) + } + + kr.trackHiddenCollector(c) + return nil +} + +// Registerer exposes the underlying prometheus.Registerer +func (kr *kubeRegistry) Registerer() prometheus.Registerer { + return kr.PromRegistry +} + +// Gatherer exposes the underlying prometheus.Gatherer +func (kr *kubeRegistry) Gatherer() prometheus.Gatherer { + return kr.PromRegistry +} + +// MustRegister works like Register but registers any number of +// Collectors and panics upon the first registration that causes an +// error. +func (kr *kubeRegistry) MustRegister(cs ...Registerable) { + metrics := make([]prometheus.Collector, 0, len(cs)) + for _, c := range cs { + if c.Create(&kr.version) { + metrics = append(metrics, c) + kr.addResettable(c) + } else { + kr.trackHiddenCollector(c) + } + } + kr.PromRegistry.MustRegister(metrics...) +} + +// CustomRegister registers a new custom collector. +func (kr *kubeRegistry) CustomRegister(c StableCollector) error { + kr.trackStableCollectors(c) + defer kr.addResettable(c) + if c.Create(&kr.version, c) { + return kr.PromRegistry.Register(c) + } + return nil +} + +// CustomMustRegister works like CustomRegister but registers any number of +// StableCollectors and panics upon the first registration that causes an +// error. +func (kr *kubeRegistry) CustomMustRegister(cs ...StableCollector) { + kr.trackStableCollectors(cs...) + collectors := make([]prometheus.Collector, 0, len(cs)) + for _, c := range cs { + if c.Create(&kr.version, c) { + kr.addResettable(c) + collectors = append(collectors, c) + } + } + kr.PromRegistry.MustRegister(collectors...) +} + +// RawMustRegister takes a native prometheus.Collector and registers the collector +// to the registry. This bypasses metrics safety checks, so should only be used +// to register custom prometheus collectors. +// +// Deprecated +func (kr *kubeRegistry) RawMustRegister(cs ...prometheus.Collector) { + kr.PromRegistry.MustRegister(cs...) + for _, c := range cs { + kr.addResettable(c) + } +} + +// addResettable will automatically add our metric to our reset +// list if it satisfies the interface +func (kr *kubeRegistry) addResettable(i interface{}) { + kr.resetLock.Lock() + defer kr.resetLock.Unlock() + if resettable, ok := i.(resettable); ok { + kr.resettables = append(kr.resettables, resettable) + } +} + +// Unregister unregisters the Collector that equals the Collector passed +// in as an argument. (Two Collectors are considered equal if their +// Describe method yields the same set of descriptors.) The function +// returns whether a Collector was unregistered. Note that an unchecked +// Collector cannot be unregistered (as its Describe method does not +// yield any descriptor). +func (kr *kubeRegistry) Unregister(collector Collector) bool { + return kr.PromRegistry.Unregister(collector) +} + +// Gather calls the Collect method of the registered Collectors and then +// gathers the collected metrics into a lexicographically sorted slice +// of uniquely named MetricFamily protobufs. Gather ensures that the +// returned slice is valid and self-consistent so that it can be used +// for valid exposition. As an exception to the strict consistency +// requirements described for metric.Desc, Gather will tolerate +// different sets of label names for metrics of the same metric family. +func (kr *kubeRegistry) Gather() ([]*dto.MetricFamily, error) { + return kr.PromRegistry.Gather() +} + +// trackHiddenCollector stores all hidden collectors. +func (kr *kubeRegistry) trackHiddenCollector(c Registerable) { + kr.hiddenCollectorsLock.Lock() + defer kr.hiddenCollectorsLock.Unlock() + + kr.hiddenCollectors[c.FQName()] = c + hiddenMetricsTotal.Inc() +} + +// trackStableCollectors stores all custom collectors. +func (kr *kubeRegistry) trackStableCollectors(cs ...StableCollector) { + kr.stableCollectorsLock.Lock() + defer kr.stableCollectorsLock.Unlock() + + kr.stableCollectors = append(kr.stableCollectors, cs...) +} + +// enableHiddenCollectors will re-register all of the hidden collectors. +func (kr *kubeRegistry) enableHiddenCollectors() { + if len(kr.hiddenCollectors) == 0 { + return + } + + kr.hiddenCollectorsLock.Lock() + cs := make([]Registerable, 0, len(kr.hiddenCollectors)) + + for _, c := range kr.hiddenCollectors { + c.ClearState() + cs = append(cs, c) + } + + kr.hiddenCollectors = make(map[string]Registerable) + kr.hiddenCollectorsLock.Unlock() + kr.MustRegister(cs...) +} + +// enableHiddenStableCollectors will re-register the stable collectors if there is one or more hidden metrics in it. +// Since we can not register a metrics twice, so we have to unregister first then register again. +func (kr *kubeRegistry) enableHiddenStableCollectors() { + if len(kr.stableCollectors) == 0 { + return + } + + kr.stableCollectorsLock.Lock() + + cs := make([]StableCollector, 0, len(kr.stableCollectors)) + for _, c := range kr.stableCollectors { + if len(c.HiddenMetrics()) > 0 { + kr.Unregister(c) // unregister must happens before clear state, otherwise no metrics would be unregister + c.ClearState() + cs = append(cs, c) + } + } + + kr.stableCollectors = nil + kr.stableCollectorsLock.Unlock() + kr.CustomMustRegister(cs...) +} + +// Reset invokes Reset on all metrics that are resettable. +func (kr *kubeRegistry) Reset() { + kr.resetLock.RLock() + defer kr.resetLock.RUnlock() + for _, r := range kr.resettables { + r.Reset() + } +} + +// BuildVersion is a helper function that can be easily mocked. +var BuildVersion = version.Get + +func newKubeRegistry(v apimachineryversion.Info) *kubeRegistry { + r := &kubeRegistry{ + PromRegistry: prometheus.NewRegistry(), + version: parseVersion(v), + hiddenCollectors: make(map[string]Registerable), + resettables: make([]resettable, 0), + } + + registriesLock.Lock() + defer registriesLock.Unlock() + registries = append(registries, r) + + return r +} + +// NewKubeRegistry creates a new vanilla Registry +func NewKubeRegistry() KubeRegistry { + r := newKubeRegistry(BuildVersion()) + return r +} + +func (r *kubeRegistry) RegisterMetaMetrics() { + r.MustRegister(registeredMetrics) + r.MustRegister(disabledMetricsTotal) + r.MustRegister(hiddenMetricsTotal) +} diff --git a/vendor/k8s.io/component-base/metrics/summary.go b/vendor/k8s.io/component-base/metrics/summary.go new file mode 100644 index 000000000..d40421645 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/summary.go @@ -0,0 +1,226 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "context" + + "github.com/blang/semver/v4" + "github.com/prometheus/client_golang/prometheus" +) + +const ( + DefAgeBuckets = prometheus.DefAgeBuckets + DefBufCap = prometheus.DefBufCap + DefMaxAge = prometheus.DefMaxAge +) + +// Summary is our internal representation for our wrapping struct around prometheus +// summaries. Summary implements both kubeCollector and ObserverMetric +// +// DEPRECATED: as per the metrics overhaul KEP +type Summary struct { + ObserverMetric + *SummaryOpts + lazyMetric + selfCollector +} + +// NewSummary returns an object which is Summary-like. However, nothing +// will be measured until the summary is registered somewhere. +// +// DEPRECATED: as per the metrics overhaul KEP +func NewSummary(opts *SummaryOpts) *Summary { + opts.StabilityLevel.setDefaults() + + s := &Summary{ + SummaryOpts: opts, + lazyMetric: lazyMetric{stabilityLevel: opts.StabilityLevel}, + } + s.setPrometheusSummary(noopMetric{}) + s.lazyInit(s, BuildFQName(opts.Namespace, opts.Subsystem, opts.Name)) + return s +} + +// setPrometheusSummary sets the underlying KubeGauge object, i.e. the thing that does the measurement. +func (s *Summary) setPrometheusSummary(summary prometheus.Summary) { + s.ObserverMetric = summary + s.initSelfCollection(summary) +} + +// DeprecatedVersion returns a pointer to the Version or nil +func (s *Summary) DeprecatedVersion() *semver.Version { + return parseSemver(s.SummaryOpts.DeprecatedVersion) +} + +// initializeMetric invokes the actual prometheus.Summary object instantiation +// and stores a reference to it +func (s *Summary) initializeMetric() { + s.SummaryOpts.annotateStabilityLevel() + // this actually creates the underlying prometheus gauge. + s.setPrometheusSummary(prometheus.NewSummary(s.SummaryOpts.toPromSummaryOpts())) +} + +// initializeDeprecatedMetric invokes the actual prometheus.Summary object instantiation +// but modifies the Help description prior to object instantiation. +func (s *Summary) initializeDeprecatedMetric() { + s.SummaryOpts.markDeprecated() + s.initializeMetric() +} + +// WithContext allows the normal Summary metric to pass in context. The context is no-op now. +func (s *Summary) WithContext(ctx context.Context) ObserverMetric { + return s.ObserverMetric +} + +// SummaryVec is the internal representation of our wrapping struct around prometheus +// summaryVecs. +// +// DEPRECATED: as per the metrics overhaul KEP +type SummaryVec struct { + *prometheus.SummaryVec + *SummaryOpts + lazyMetric + originalLabels []string +} + +// NewSummaryVec returns an object which satisfies kubeCollector and wraps the +// prometheus.SummaryVec object. However, the object returned will not measure +// anything unless the collector is first registered, since the metric is lazily instantiated, +// and only members extracted after +// registration will actually measure anything. +// +// DEPRECATED: as per the metrics overhaul KEP +func NewSummaryVec(opts *SummaryOpts, labels []string) *SummaryVec { + opts.StabilityLevel.setDefaults() + + fqName := BuildFQName(opts.Namespace, opts.Subsystem, opts.Name) + allowListLock.RLock() + if allowList, ok := labelValueAllowLists[fqName]; ok { + opts.LabelValueAllowLists = allowList + } + allowListLock.RUnlock() + + v := &SummaryVec{ + SummaryOpts: opts, + originalLabels: labels, + lazyMetric: lazyMetric{stabilityLevel: opts.StabilityLevel}, + } + v.lazyInit(v, fqName) + return v +} + +// DeprecatedVersion returns a pointer to the Version or nil +func (v *SummaryVec) DeprecatedVersion() *semver.Version { + return parseSemver(v.SummaryOpts.DeprecatedVersion) +} + +func (v *SummaryVec) initializeMetric() { + v.SummaryOpts.annotateStabilityLevel() + v.SummaryVec = prometheus.NewSummaryVec(v.SummaryOpts.toPromSummaryOpts(), v.originalLabels) +} + +func (v *SummaryVec) initializeDeprecatedMetric() { + v.SummaryOpts.markDeprecated() + v.initializeMetric() +} + +// Default Prometheus Vec behavior is that member extraction results in creation of a new element +// if one with the unique label values is not found in the underlying stored metricMap. +// This means that if this function is called but the underlying metric is not registered +// (which means it will never be exposed externally nor consumed), the metric will exist in memory +// for perpetuity (i.e. throughout application lifecycle). +// +// For reference: https://github.com/prometheus/client_golang/blob/v0.9.2/prometheus/histogram.go#L460-L470 +// +// In contrast, the Vec behavior in this package is that member extraction before registration +// returns a permanent noop object. + +// WithLabelValues returns the ObserverMetric for the given slice of label +// values (same order as the VariableLabels in Desc). If that combination of +// label values is accessed for the first time, a new ObserverMetric is created IFF the summaryVec +// has been registered to a metrics registry. +func (v *SummaryVec) WithLabelValues(lvs ...string) ObserverMetric { + if !v.IsCreated() { + return noop + } + if v.LabelValueAllowLists != nil { + v.LabelValueAllowLists.ConstrainToAllowedList(v.originalLabels, lvs) + } + return v.SummaryVec.WithLabelValues(lvs...) +} + +// With returns the ObserverMetric for the given Labels map (the label names +// must match those of the VariableLabels in Desc). If that label map is +// accessed for the first time, a new ObserverMetric is created IFF the summaryVec has +// been registered to a metrics registry. +func (v *SummaryVec) With(labels map[string]string) ObserverMetric { + if !v.IsCreated() { + return noop + } + if v.LabelValueAllowLists != nil { + v.LabelValueAllowLists.ConstrainLabelMap(labels) + } + return v.SummaryVec.With(labels) +} + +// Delete deletes the metric where the variable labels are the same as those +// passed in as labels. It returns true if a metric was deleted. +// +// It is not an error if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc. However, such inconsistent Labels +// can never match an actual metric, so the method will always return false in +// that case. +func (v *SummaryVec) Delete(labels map[string]string) bool { + if !v.IsCreated() { + return false // since we haven't created the metric, we haven't deleted a metric with the passed in values + } + return v.SummaryVec.Delete(labels) +} + +// Reset deletes all metrics in this vector. +func (v *SummaryVec) Reset() { + if !v.IsCreated() { + return + } + + v.SummaryVec.Reset() +} + +// WithContext returns wrapped SummaryVec with context +func (v *SummaryVec) WithContext(ctx context.Context) *SummaryVecWithContext { + return &SummaryVecWithContext{ + ctx: ctx, + SummaryVec: v, + } +} + +// SummaryVecWithContext is the wrapper of SummaryVec with context. +type SummaryVecWithContext struct { + *SummaryVec + ctx context.Context +} + +// WithLabelValues is the wrapper of SummaryVec.WithLabelValues. +func (vc *SummaryVecWithContext) WithLabelValues(lvs ...string) ObserverMetric { + return vc.SummaryVec.WithLabelValues(lvs...) +} + +// With is the wrapper of SummaryVec.With. +func (vc *SummaryVecWithContext) With(labels map[string]string) ObserverMetric { + return vc.SummaryVec.With(labels) +} diff --git a/vendor/k8s.io/component-base/metrics/timing_histogram.go b/vendor/k8s.io/component-base/metrics/timing_histogram.go new file mode 100644 index 000000000..a0f0b253c --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/timing_histogram.go @@ -0,0 +1,270 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "context" + "time" + + "github.com/blang/semver/v4" + promext "k8s.io/component-base/metrics/prometheusextension" +) + +// PrometheusTimingHistogram is the abstraction of the underlying histogram +// that we want to promote from the wrapper. +type PrometheusTimingHistogram interface { + GaugeMetric +} + +// TimingHistogram is our internal representation for our wrapping struct around +// timing histograms. It implements both kubeCollector and GaugeMetric +type TimingHistogram struct { + PrometheusTimingHistogram + *TimingHistogramOpts + nowFunc func() time.Time + lazyMetric + selfCollector +} + +var _ GaugeMetric = &TimingHistogram{} +var _ Registerable = &TimingHistogram{} +var _ kubeCollector = &TimingHistogram{} + +// NewTimingHistogram returns an object which is TimingHistogram-like. However, nothing +// will be measured until the histogram is registered somewhere. +func NewTimingHistogram(opts *TimingHistogramOpts) *TimingHistogram { + return NewTestableTimingHistogram(time.Now, opts) +} + +// NewTestableTimingHistogram adds injection of the clock +func NewTestableTimingHistogram(nowFunc func() time.Time, opts *TimingHistogramOpts) *TimingHistogram { + opts.StabilityLevel.setDefaults() + + h := &TimingHistogram{ + TimingHistogramOpts: opts, + nowFunc: nowFunc, + lazyMetric: lazyMetric{stabilityLevel: opts.StabilityLevel}, + } + h.setPrometheusHistogram(noopMetric{}) + h.lazyInit(h, BuildFQName(opts.Namespace, opts.Subsystem, opts.Name)) + return h +} + +// setPrometheusHistogram sets the underlying KubeGauge object, i.e. the thing that does the measurement. +func (h *TimingHistogram) setPrometheusHistogram(histogram promext.TimingHistogram) { + h.PrometheusTimingHistogram = histogram + h.initSelfCollection(histogram) +} + +// DeprecatedVersion returns a pointer to the Version or nil +func (h *TimingHistogram) DeprecatedVersion() *semver.Version { + return parseSemver(h.TimingHistogramOpts.DeprecatedVersion) +} + +// initializeMetric invokes the actual prometheus.Histogram object instantiation +// and stores a reference to it +func (h *TimingHistogram) initializeMetric() { + h.TimingHistogramOpts.annotateStabilityLevel() + // this actually creates the underlying prometheus gauge. + histogram, err := promext.NewTestableTimingHistogram(h.nowFunc, h.TimingHistogramOpts.toPromHistogramOpts()) + if err != nil { + panic(err) // handle as for regular histograms + } + h.setPrometheusHistogram(histogram) +} + +// initializeDeprecatedMetric invokes the actual prometheus.Histogram object instantiation +// but modifies the Help description prior to object instantiation. +func (h *TimingHistogram) initializeDeprecatedMetric() { + h.TimingHistogramOpts.markDeprecated() + h.initializeMetric() +} + +// WithContext allows the normal TimingHistogram metric to pass in context. The context is no-op now. +func (h *TimingHistogram) WithContext(ctx context.Context) GaugeMetric { + return h.PrometheusTimingHistogram +} + +// TimingHistogramVec is the internal representation of our wrapping struct around prometheus +// TimingHistogramVecs. +type TimingHistogramVec struct { + *promext.TimingHistogramVec + *TimingHistogramOpts + nowFunc func() time.Time + lazyMetric + originalLabels []string +} + +var _ GaugeVecMetric = &TimingHistogramVec{} +var _ Registerable = &TimingHistogramVec{} +var _ kubeCollector = &TimingHistogramVec{} + +// NewTimingHistogramVec returns an object which satisfies the kubeCollector, Registerable, and GaugeVecMetric interfaces +// and wraps an underlying promext.TimingHistogramVec object. Note well the way that +// behavior depends on registration and whether this is hidden. +func NewTimingHistogramVec(opts *TimingHistogramOpts, labels []string) *TimingHistogramVec { + return NewTestableTimingHistogramVec(time.Now, opts, labels) +} + +// NewTestableTimingHistogramVec adds injection of the clock. +func NewTestableTimingHistogramVec(nowFunc func() time.Time, opts *TimingHistogramOpts, labels []string) *TimingHistogramVec { + opts.StabilityLevel.setDefaults() + + fqName := BuildFQName(opts.Namespace, opts.Subsystem, opts.Name) + allowListLock.RLock() + if allowList, ok := labelValueAllowLists[fqName]; ok { + opts.LabelValueAllowLists = allowList + } + allowListLock.RUnlock() + + v := &TimingHistogramVec{ + TimingHistogramVec: noopTimingHistogramVec, + TimingHistogramOpts: opts, + nowFunc: nowFunc, + originalLabels: labels, + lazyMetric: lazyMetric{stabilityLevel: opts.StabilityLevel}, + } + v.lazyInit(v, fqName) + return v +} + +// DeprecatedVersion returns a pointer to the Version or nil +func (v *TimingHistogramVec) DeprecatedVersion() *semver.Version { + return parseSemver(v.TimingHistogramOpts.DeprecatedVersion) +} + +func (v *TimingHistogramVec) initializeMetric() { + v.TimingHistogramOpts.annotateStabilityLevel() + v.TimingHistogramVec = promext.NewTestableTimingHistogramVec(v.nowFunc, v.TimingHistogramOpts.toPromHistogramOpts(), v.originalLabels...) +} + +func (v *TimingHistogramVec) initializeDeprecatedMetric() { + v.TimingHistogramOpts.markDeprecated() + v.initializeMetric() +} + +// WithLabelValuesChecked, if called before this vector has been registered in +// at least one registry, will return a noop gauge and +// an error that passes ErrIsNotRegistered. +// If called on a hidden vector, +// will return a noop gauge and a nil error. +// If called with a syntactic problem in the labels, will +// return a noop gauge and an error about the labels. +// If none of the above apply, this method will return +// the appropriate vector member and a nil error. +func (v *TimingHistogramVec) WithLabelValuesChecked(lvs ...string) (GaugeMetric, error) { + if !v.IsCreated() { + if v.IsHidden() { + return noop, nil + } + return noop, errNotRegistered + } + if v.LabelValueAllowLists != nil { + v.LabelValueAllowLists.ConstrainToAllowedList(v.originalLabels, lvs) + } + ops, err := v.TimingHistogramVec.GetMetricWithLabelValues(lvs...) + if err != nil { + return noop, err + } + return ops.(GaugeMetric), err +} + +// WithLabelValues calls WithLabelValuesChecked +// and handles errors as follows. +// An error that passes ErrIsNotRegistered is ignored +// and the noop gauge is returned; +// all other errors cause a panic. +func (v *TimingHistogramVec) WithLabelValues(lvs ...string) GaugeMetric { + ans, err := v.WithLabelValuesChecked(lvs...) + if err == nil || ErrIsNotRegistered(err) { + return ans + } + panic(err) +} + +// WithChecked, if called before this vector has been registered in +// at least one registry, will return a noop gauge and +// an error that passes ErrIsNotRegistered. +// If called on a hidden vector, +// will return a noop gauge and a nil error. +// If called with a syntactic problem in the labels, will +// return a noop gauge and an error about the labels. +// If none of the above apply, this method will return +// the appropriate vector member and a nil error. +func (v *TimingHistogramVec) WithChecked(labels map[string]string) (GaugeMetric, error) { + if !v.IsCreated() { + if v.IsHidden() { + return noop, nil + } + return noop, errNotRegistered + } + if v.LabelValueAllowLists != nil { + v.LabelValueAllowLists.ConstrainLabelMap(labels) + } + ops, err := v.TimingHistogramVec.GetMetricWith(labels) + return ops.(GaugeMetric), err +} + +// With calls WithChecked and handles errors as follows. +// An error that passes ErrIsNotRegistered is ignored +// and the noop gauge is returned; +// all other errors cause a panic. +func (v *TimingHistogramVec) With(labels map[string]string) GaugeMetric { + ans, err := v.WithChecked(labels) + if err == nil || ErrIsNotRegistered(err) { + return ans + } + panic(err) +} + +// Delete deletes the metric where the variable labels are the same as those +// passed in as labels. It returns true if a metric was deleted. +// +// It is not an error if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc. However, such inconsistent Labels +// can never match an actual metric, so the method will always return false in +// that case. +func (v *TimingHistogramVec) Delete(labels map[string]string) bool { + if !v.IsCreated() { + return false // since we haven't created the metric, we haven't deleted a metric with the passed in values + } + return v.TimingHistogramVec.Delete(labels) +} + +// Reset deletes all metrics in this vector. +func (v *TimingHistogramVec) Reset() { + if !v.IsCreated() { + return + } + + v.TimingHistogramVec.Reset() +} + +// WithContext returns wrapped TimingHistogramVec with context +func (v *TimingHistogramVec) InterfaceWithContext(ctx context.Context) GaugeVecMetric { + return &TimingHistogramVecWithContext{ + ctx: ctx, + TimingHistogramVec: v, + } +} + +// TimingHistogramVecWithContext is the wrapper of TimingHistogramVec with context. +// Currently the context is ignored. +type TimingHistogramVecWithContext struct { + *TimingHistogramVec + ctx context.Context +} diff --git a/vendor/k8s.io/component-base/metrics/value.go b/vendor/k8s.io/component-base/metrics/value.go new file mode 100644 index 000000000..4a405048c --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/value.go @@ -0,0 +1,70 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "time" + + "github.com/prometheus/client_golang/prometheus" +) + +// ValueType is an enumeration of metric types that represent a simple value. +type ValueType int + +// Possible values for the ValueType enum. +const ( + _ ValueType = iota + CounterValue + GaugeValue + UntypedValue +) + +func (vt *ValueType) toPromValueType() prometheus.ValueType { + return prometheus.ValueType(*vt) +} + +// NewLazyConstMetric is a helper of MustNewConstMetric. +// +// Note: If the metrics described by the desc is hidden, the metrics will not be created. +func NewLazyConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) Metric { + if desc.IsHidden() { + return nil + } + return prometheus.MustNewConstMetric(desc.toPrometheusDesc(), valueType.toPromValueType(), value, labelValues...) +} + +// NewConstMetric is a helper of NewConstMetric. +// +// Note: If the metrics described by the desc is hidden, the metrics will not be created. +func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) (Metric, error) { + if desc.IsHidden() { + return nil, nil + } + return prometheus.NewConstMetric(desc.toPrometheusDesc(), valueType.toPromValueType(), value, labelValues...) +} + +// NewLazyMetricWithTimestamp is a helper of NewMetricWithTimestamp. +// +// Warning: the Metric 'm' must be the one created by NewLazyConstMetric(), +// otherwise, no stability guarantees would be offered. +func NewLazyMetricWithTimestamp(t time.Time, m Metric) Metric { + if m == nil { + return nil + } + + return prometheus.NewMetricWithTimestamp(t, m) +} diff --git a/vendor/k8s.io/component-base/metrics/version.go b/vendor/k8s.io/component-base/metrics/version.go new file mode 100644 index 000000000..f963e205e --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/version.go @@ -0,0 +1,37 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import "k8s.io/component-base/version" + +var ( + buildInfo = NewGaugeVec( + &GaugeOpts{ + Name: "kubernetes_build_info", + Help: "A metric with a constant '1' value labeled by major, minor, git version, git commit, git tree state, build date, Go version, and compiler from which Kubernetes was built, and platform on which it is running.", + StabilityLevel: ALPHA, + }, + []string{"major", "minor", "git_version", "git_commit", "git_tree_state", "build_date", "go_version", "compiler", "platform"}, + ) +) + +// RegisterBuildInfo registers the build and version info in a metadata metric in prometheus +func RegisterBuildInfo(r KubeRegistry) { + info := version.Get() + r.MustRegister(buildInfo) + buildInfo.WithLabelValues(info.Major, info.Minor, info.GitVersion, info.GitCommit, info.GitTreeState, info.BuildDate, info.GoVersion, info.Compiler, info.Platform).Set(1) +} diff --git a/vendor/k8s.io/component-base/metrics/version_parser.go b/vendor/k8s.io/component-base/metrics/version_parser.go new file mode 100644 index 000000000..102e108e2 --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/version_parser.go @@ -0,0 +1,50 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "fmt" + "regexp" + + "github.com/blang/semver/v4" + + apimachineryversion "k8s.io/apimachinery/pkg/version" +) + +const ( + versionRegexpString = `^v(\d+\.\d+\.\d+)` +) + +var ( + versionRe = regexp.MustCompile(versionRegexpString) +) + +func parseSemver(s string) *semver.Version { + if s != "" { + sv := semver.MustParse(s) + return &sv + } + return nil +} +func parseVersion(ver apimachineryversion.Info) semver.Version { + matches := versionRe.FindAllStringSubmatch(ver.String(), -1) + + if len(matches) != 1 { + panic(fmt.Sprintf("version string \"%v\" doesn't match expected regular expression: \"%v\"", ver.String(), versionRe.String())) + } + return semver.MustParse(matches[0][1]) +} diff --git a/vendor/k8s.io/component-base/metrics/wrappers.go b/vendor/k8s.io/component-base/metrics/wrappers.go new file mode 100644 index 000000000..679590aad --- /dev/null +++ b/vendor/k8s.io/component-base/metrics/wrappers.go @@ -0,0 +1,167 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "errors" + + "github.com/prometheus/client_golang/prometheus" + dto "github.com/prometheus/client_model/go" +) + +// This file contains a series of interfaces which we explicitly define for +// integrating with prometheus. We redefine the interfaces explicitly here +// so that we can prevent breakage if methods are ever added to prometheus +// variants of them. + +// Collector defines a subset of prometheus.Collector interface methods +type Collector interface { + Describe(chan<- *prometheus.Desc) + Collect(chan<- prometheus.Metric) +} + +// Metric defines a subset of prometheus.Metric interface methods +type Metric interface { + Desc() *prometheus.Desc + Write(*dto.Metric) error +} + +// CounterMetric is a Metric that represents a single numerical value that only ever +// goes up. That implies that it cannot be used to count items whose number can +// also go down, e.g. the number of currently running goroutines. Those +// "counters" are represented by Gauges. + +// CounterMetric is an interface which defines a subset of the interface provided by prometheus.Counter +type CounterMetric interface { + Inc() + Add(float64) +} + +// CounterVecMetric is an interface which prometheus.CounterVec satisfies. +type CounterVecMetric interface { + WithLabelValues(...string) CounterMetric + With(prometheus.Labels) CounterMetric +} + +// GaugeMetric is an interface which defines a subset of the interface provided by prometheus.Gauge +type GaugeMetric interface { + Set(float64) + Inc() + Dec() + Add(float64) + Write(out *dto.Metric) error + SetToCurrentTime() +} + +// GaugeVecMetric is a collection of Gauges that differ only in label values. +type GaugeVecMetric interface { + // Default Prometheus Vec behavior is that member extraction results in creation of a new element + // if one with the unique label values is not found in the underlying stored metricMap. + // This means that if this function is called but the underlying metric is not registered + // (which means it will never be exposed externally nor consumed), the metric would exist in memory + // for perpetuity (i.e. throughout application lifecycle). + // + // For reference: https://github.com/prometheus/client_golang/blob/v0.9.2/prometheus/gauge.go#L190-L208 + // + // In contrast, the Vec behavior in this package is that member extraction before registration + // returns a permanent noop object. + + // WithLabelValuesChecked, if called before this vector has been registered in + // at least one registry, will return a noop gauge and + // an error that passes ErrIsNotRegistered. + // If called on a hidden vector, + // will return a noop gauge and a nil error. + // If called with a syntactic problem in the labels, will + // return a noop gauge and an error about the labels. + // If none of the above apply, this method will return + // the appropriate vector member and a nil error. + WithLabelValuesChecked(labelValues ...string) (GaugeMetric, error) + + // WithLabelValues calls WithLabelValuesChecked + // and handles errors as follows. + // An error that passes ErrIsNotRegistered is ignored + // and the noop gauge is returned; + // all other errors cause a panic. + WithLabelValues(labelValues ...string) GaugeMetric + + // WithChecked, if called before this vector has been registered in + // at least one registry, will return a noop gauge and + // an error that passes ErrIsNotRegistered. + // If called on a hidden vector, + // will return a noop gauge and a nil error. + // If called with a syntactic problem in the labels, will + // return a noop gauge and an error about the labels. + // If none of the above apply, this method will return + // the appropriate vector member and a nil error. + WithChecked(labels map[string]string) (GaugeMetric, error) + + // With calls WithChecked and handles errors as follows. + // An error that passes ErrIsNotRegistered is ignored + // and the noop gauge is returned; + // all other errors cause a panic. + With(labels map[string]string) GaugeMetric + + // Delete asserts that the vec should have no member for the given label set. + // The returned bool indicates whether there was a change. + // The return will certainly be `false` if the given label set has the wrong + // set of label names. + Delete(map[string]string) bool + + // Reset removes all the members + Reset() +} + +// ObserverMetric captures individual observations. +type ObserverMetric interface { + Observe(float64) +} + +// PromRegistry is an interface which implements a subset of prometheus.Registerer and +// prometheus.Gatherer interfaces +type PromRegistry interface { + Register(prometheus.Collector) error + MustRegister(...prometheus.Collector) + Unregister(prometheus.Collector) bool + Gather() ([]*dto.MetricFamily, error) +} + +// Gatherer is the interface for the part of a registry in charge of gathering +// the collected metrics into a number of MetricFamilies. +type Gatherer interface { + prometheus.Gatherer +} + +// Registerer is the interface for the part of a registry in charge of registering +// the collected metrics. +type Registerer interface { + prometheus.Registerer +} + +// GaugeFunc is a Gauge whose value is determined at collect time by calling a +// provided function. +// +// To create GaugeFunc instances, use NewGaugeFunc. +type GaugeFunc interface { + Metric + Collector +} + +func ErrIsNotRegistered(err error) bool { + return err == errNotRegistered +} + +var errNotRegistered = errors.New("metric vec is not registered yet") diff --git a/vendor/k8s.io/kube-aggregator/LICENSE b/vendor/k8s.io/kube-aggregator/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/k8s.io/kube-aggregator/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/doc.go b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/doc.go new file mode 100644 index 000000000..394bcbc8e --- /dev/null +++ b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/doc.go @@ -0,0 +1,21 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +groupName=apiregistration.k8s.io + +// Package apiregistration is the internal version of the API. +package apiregistration // import "k8s.io/kube-aggregator/pkg/apis/apiregistration" diff --git a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/helpers.go b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/helpers.go new file mode 100644 index 000000000..dfa746008 --- /dev/null +++ b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/helpers.go @@ -0,0 +1,128 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apiregistration + +import ( + "sort" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/version" +) + +// SortedByGroupAndVersion sorts APIServices into their different groups, and then sorts them based on their versions. +// For example, the first element of the first array contains the APIService with the highest version number, in the +// group with the highest priority; while the last element of the last array contains the APIService with the lowest +// version number, in the group with the lowest priority. +func SortedByGroupAndVersion(servers []*APIService) [][]*APIService { + serversByGroupPriorityMinimum := ByGroupPriorityMinimum(servers) + sort.Sort(serversByGroupPriorityMinimum) + + ret := [][]*APIService{} + for _, curr := range serversByGroupPriorityMinimum { + // check to see if we already have an entry for this group + existingIndex := -1 + for j, groupInReturn := range ret { + if groupInReturn[0].Spec.Group == curr.Spec.Group { + existingIndex = j + break + } + } + + if existingIndex >= 0 { + ret[existingIndex] = append(ret[existingIndex], curr) + sort.Sort(ByVersionPriority(ret[existingIndex])) + continue + } + + ret = append(ret, []*APIService{curr}) + } + + return ret +} + +// ByGroupPriorityMinimum sorts with the highest group number first, then by name. +// This is not a simple reverse, because we want the name sorting to be alpha, not +// reverse alpha. +type ByGroupPriorityMinimum []*APIService + +func (s ByGroupPriorityMinimum) Len() int { return len(s) } +func (s ByGroupPriorityMinimum) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s ByGroupPriorityMinimum) Less(i, j int) bool { + if s[i].Spec.GroupPriorityMinimum != s[j].Spec.GroupPriorityMinimum { + return s[i].Spec.GroupPriorityMinimum > s[j].Spec.GroupPriorityMinimum + } + return s[i].Name < s[j].Name +} + +// ByVersionPriority sorts with the highest version number first, then by name. +// This is not a simple reverse, because we want the name sorting to be alpha, not +// reverse alpha. +type ByVersionPriority []*APIService + +func (s ByVersionPriority) Len() int { return len(s) } +func (s ByVersionPriority) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s ByVersionPriority) Less(i, j int) bool { + if s[i].Spec.VersionPriority != s[j].Spec.VersionPriority { + return s[i].Spec.VersionPriority > s[j].Spec.VersionPriority + } + return version.CompareKubeAwareVersionStrings(s[i].Spec.Version, s[j].Spec.Version) > 0 +} + +// NewLocalAvailableAPIServiceCondition returns a condition for an available local APIService. +func NewLocalAvailableAPIServiceCondition() APIServiceCondition { + return APIServiceCondition{ + Type: Available, + Status: ConditionTrue, + LastTransitionTime: metav1.Now(), + Reason: "Local", + Message: "Local APIServices are always available", + } +} + +// GetAPIServiceConditionByType gets an *APIServiceCondition by APIServiceConditionType if present +func GetAPIServiceConditionByType(apiService *APIService, conditionType APIServiceConditionType) *APIServiceCondition { + for i := range apiService.Status.Conditions { + if apiService.Status.Conditions[i].Type == conditionType { + return &apiService.Status.Conditions[i] + } + } + return nil +} + +// SetAPIServiceCondition sets the status condition. It either overwrites the existing one or +// creates a new one +func SetAPIServiceCondition(apiService *APIService, newCondition APIServiceCondition) { + existingCondition := GetAPIServiceConditionByType(apiService, newCondition.Type) + if existingCondition == nil { + apiService.Status.Conditions = append(apiService.Status.Conditions, newCondition) + return + } + + if existingCondition.Status != newCondition.Status { + existingCondition.Status = newCondition.Status + existingCondition.LastTransitionTime = newCondition.LastTransitionTime + } + + existingCondition.Reason = newCondition.Reason + existingCondition.Message = newCondition.Message +} + +// IsAPIServiceConditionTrue indicates if the condition is present and strictly true +func IsAPIServiceConditionTrue(apiService *APIService, conditionType APIServiceConditionType) bool { + condition := GetAPIServiceConditionByType(apiService, conditionType) + return condition != nil && condition.Status == ConditionTrue +} diff --git a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/register.go b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/register.go new file mode 100644 index 000000000..7b88df42f --- /dev/null +++ b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/register.go @@ -0,0 +1,54 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apiregistration + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the API group for apiregistration +const GroupName = "apiregistration.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +// Kind takes an unqualified kind and returns back a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns back a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // SchemeBuilder is the scheme builder with scheme init functions to run for this API package + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // AddToScheme is a common registration function for mapping packaged scoped group & version keys to a scheme + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &APIService{}, + &APIServiceList{}, + ) + return nil +} diff --git a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/types.go b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/types.go new file mode 100644 index 000000000..95b03a1dd --- /dev/null +++ b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/types.go @@ -0,0 +1,146 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apiregistration + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// APIServiceList is a list of APIService objects. +type APIServiceList struct { + metav1.TypeMeta + metav1.ListMeta + + Items []APIService +} + +// ServiceReference holds a reference to Service.legacy.k8s.io +type ServiceReference struct { + // Namespace is the namespace of the service + Namespace string + // Name is the name of the service + Name string + // If specified, the port on the service that hosting the service. + // Default to 443 for backward compatibility. + // `port` should be a valid port number (1-65535, inclusive). + // +optional + Port int32 +} + +// APIServiceSpec contains information for locating and communicating with a server. +// Only https is supported, though you are able to disable certificate verification. +type APIServiceSpec struct { + // Service is a reference to the service for this API server. It must communicate + // on port 443. + // If the Service is nil, that means the handling for the API groupversion is handled locally on this server. + // The call will simply delegate to the normal handler chain to be fulfilled. + // +optional + Service *ServiceReference + // Group is the API group name this server hosts + Group string + // Version is the API version this server hosts. For example, "v1" + Version string + + // InsecureSkipTLSVerify disables TLS certificate verification when communicating with this server. + // This is strongly discouraged. You should use the CABundle instead. + InsecureSkipTLSVerify bool + // CABundle is a PEM encoded CA bundle which will be used to validate an API server's serving certificate. + // If unspecified, system trust roots on the apiserver are used. + // +listType=atomic + // +optional + CABundle []byte + + // GroupPriorityMinimum is the priority this group should have at least. Higher priority means that the group is preferred by clients over lower priority ones. + // Note that other versions of this group might specify even higher GroupPriorityMininum values such that the whole group gets a higher priority. + // The primary sort is based on GroupPriorityMinimum, ordered highest number to lowest (20 before 10). + // The secondary sort is based on the alphabetical comparison of the name of the object. (v1.bar before v1.foo) + // We'd recommend something like: *.k8s.io (except extensions) at 18000 and + // PaaSes (OpenShift, Deis) are recommended to be in the 2000s + GroupPriorityMinimum int32 + + // VersionPriority controls the ordering of this API version inside of its group. Must be greater than zero. + // The primary sort is based on VersionPriority, ordered highest to lowest (20 before 10). + // Since it's inside of a group, the number can be small, probably in the 10s. + // In case of equal version priorities, the version string will be used to compute the order inside a group. + // If the version string is "kube-like", it will sort above non "kube-like" version strings, which are ordered + // lexicographically. "Kube-like" versions start with a "v", then are followed by a number (the major version), + // then optionally the string "alpha" or "beta" and another number (the minor version). These are sorted first + // by GA > beta > alpha (where GA is a version with no suffix such as beta or alpha), and then by comparing major + // version, then minor version. An example sorted list of versions: + // v10, v2, v1, v11beta2, v10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10. + VersionPriority int32 +} + +// ConditionStatus indicates the status of a condition (true, false, or unknown). +type ConditionStatus string + +// These are valid condition statuses. "ConditionTrue" means a resource is in the condition; +// "ConditionFalse" means a resource is not in the condition; "ConditionUnknown" means kubernetes +// can't decide if a resource is in the condition or not. In the future, we could add other +// intermediate conditions, e.g. ConditionDegraded. +const ( + ConditionTrue ConditionStatus = "True" + ConditionFalse ConditionStatus = "False" + ConditionUnknown ConditionStatus = "Unknown" +) + +// APIServiceConditionType is a valid value for APIServiceCondition.Type +type APIServiceConditionType string + +const ( + // Available indicates that the service exists and is reachable + Available APIServiceConditionType = "Available" +) + +// APIServiceCondition describes conditions for an APIService +type APIServiceCondition struct { + // Type is the type of the condition. + Type APIServiceConditionType + // Status is the status of the condition. + // Can be True, False, Unknown. + Status ConditionStatus + // Last time the condition transitioned from one status to another. + LastTransitionTime metav1.Time + // Unique, one-word, CamelCase reason for the condition's last transition. + Reason string + // Human-readable message indicating details about last transition. + Message string +} + +// APIServiceStatus contains derived information about an API server +type APIServiceStatus struct { + // Current service state of apiService. + // +listType=map + // +listMapKey=type + Conditions []APIServiceCondition +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// APIService represents a server for a particular GroupVersion. +// Name must be "version.group". +type APIService struct { + metav1.TypeMeta + metav1.ObjectMeta + + // Spec contains information for locating and communicating with a server + Spec APIServiceSpec + // Status contains derived information about an API server + Status APIServiceStatus +} diff --git a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/defaults.go b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/defaults.go new file mode 100644 index 000000000..2ae90d646 --- /dev/null +++ b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/defaults.go @@ -0,0 +1,33 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime" + utilpointer "k8s.io/utils/pointer" +) + +func addDefaultingFuncs(scheme *runtime.Scheme) error { + return RegisterDefaults(scheme) +} + +// SetDefaults_ServiceReference sets defaults for AuditSync Webhook's ServiceReference +func SetDefaults_ServiceReference(obj *ServiceReference) { + if obj.Port == nil { + obj.Port = utilpointer.Int32Ptr(443) + } +} diff --git a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/doc.go b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/doc.go new file mode 100644 index 000000000..b9993f4ca --- /dev/null +++ b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/doc.go @@ -0,0 +1,37 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:conversion-gen=k8s.io/kube-aggregator/pkg/apis/apiregistration +// +k8s:openapi-gen=true +// +groupName=apiregistration.k8s.io +// +k8s:defaulter-gen=TypeMeta + +// Package v1 contains the API Registration API, which is responsible for +// registering an API `Group`/`Version` with another kubernetes like API server. +// The `APIService` holds information about the other API server in +// `APIServiceSpec` type as well as general `TypeMeta` and `ObjectMeta`. The +// `APIServiceSpec` type have the main configuration needed to do the +// aggregation. Any request coming for specified `Group`/`Version` will be +// directed to the service defined by `ServiceReference` (on port 443) after +// validating the target using provided `CABundle` or skipping validation +// if development flag `InsecureSkipTLSVerify` is set. `Priority` is controlling +// the order of this API group in the overall discovery document. +// The return status is a set of conditions for this aggregation. Currently +// there is only one condition named "Available", if true, it means the +// api/server requests will be redirected to specified API server. +package v1 // import "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1" diff --git a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/generated.pb.go b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/generated.pb.go new file mode 100644 index 000000000..a18956b9a --- /dev/null +++ b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/generated.pb.go @@ -0,0 +1,1814 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/generated.proto + +package v1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *APIService) Reset() { *m = APIService{} } +func (*APIService) ProtoMessage() {} +func (*APIService) Descriptor() ([]byte, []int) { + return fileDescriptor_0d3d63d6a1e30d64, []int{0} +} +func (m *APIService) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *APIService) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *APIService) XXX_Merge(src proto.Message) { + xxx_messageInfo_APIService.Merge(m, src) +} +func (m *APIService) XXX_Size() int { + return m.Size() +} +func (m *APIService) XXX_DiscardUnknown() { + xxx_messageInfo_APIService.DiscardUnknown(m) +} + +var xxx_messageInfo_APIService proto.InternalMessageInfo + +func (m *APIServiceCondition) Reset() { *m = APIServiceCondition{} } +func (*APIServiceCondition) ProtoMessage() {} +func (*APIServiceCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_0d3d63d6a1e30d64, []int{1} +} +func (m *APIServiceCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *APIServiceCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *APIServiceCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_APIServiceCondition.Merge(m, src) +} +func (m *APIServiceCondition) XXX_Size() int { + return m.Size() +} +func (m *APIServiceCondition) XXX_DiscardUnknown() { + xxx_messageInfo_APIServiceCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_APIServiceCondition proto.InternalMessageInfo + +func (m *APIServiceList) Reset() { *m = APIServiceList{} } +func (*APIServiceList) ProtoMessage() {} +func (*APIServiceList) Descriptor() ([]byte, []int) { + return fileDescriptor_0d3d63d6a1e30d64, []int{2} +} +func (m *APIServiceList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *APIServiceList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *APIServiceList) XXX_Merge(src proto.Message) { + xxx_messageInfo_APIServiceList.Merge(m, src) +} +func (m *APIServiceList) XXX_Size() int { + return m.Size() +} +func (m *APIServiceList) XXX_DiscardUnknown() { + xxx_messageInfo_APIServiceList.DiscardUnknown(m) +} + +var xxx_messageInfo_APIServiceList proto.InternalMessageInfo + +func (m *APIServiceSpec) Reset() { *m = APIServiceSpec{} } +func (*APIServiceSpec) ProtoMessage() {} +func (*APIServiceSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_0d3d63d6a1e30d64, []int{3} +} +func (m *APIServiceSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *APIServiceSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *APIServiceSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_APIServiceSpec.Merge(m, src) +} +func (m *APIServiceSpec) XXX_Size() int { + return m.Size() +} +func (m *APIServiceSpec) XXX_DiscardUnknown() { + xxx_messageInfo_APIServiceSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_APIServiceSpec proto.InternalMessageInfo + +func (m *APIServiceStatus) Reset() { *m = APIServiceStatus{} } +func (*APIServiceStatus) ProtoMessage() {} +func (*APIServiceStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_0d3d63d6a1e30d64, []int{4} +} +func (m *APIServiceStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *APIServiceStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *APIServiceStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_APIServiceStatus.Merge(m, src) +} +func (m *APIServiceStatus) XXX_Size() int { + return m.Size() +} +func (m *APIServiceStatus) XXX_DiscardUnknown() { + xxx_messageInfo_APIServiceStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_APIServiceStatus proto.InternalMessageInfo + +func (m *ServiceReference) Reset() { *m = ServiceReference{} } +func (*ServiceReference) ProtoMessage() {} +func (*ServiceReference) Descriptor() ([]byte, []int) { + return fileDescriptor_0d3d63d6a1e30d64, []int{5} +} +func (m *ServiceReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ServiceReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceReference.Merge(m, src) +} +func (m *ServiceReference) XXX_Size() int { + return m.Size() +} +func (m *ServiceReference) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceReference.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceReference proto.InternalMessageInfo + +func init() { + proto.RegisterType((*APIService)(nil), "k8s.io.kube_aggregator.pkg.apis.apiregistration.v1.APIService") + proto.RegisterType((*APIServiceCondition)(nil), "k8s.io.kube_aggregator.pkg.apis.apiregistration.v1.APIServiceCondition") + proto.RegisterType((*APIServiceList)(nil), "k8s.io.kube_aggregator.pkg.apis.apiregistration.v1.APIServiceList") + proto.RegisterType((*APIServiceSpec)(nil), "k8s.io.kube_aggregator.pkg.apis.apiregistration.v1.APIServiceSpec") + proto.RegisterType((*APIServiceStatus)(nil), "k8s.io.kube_aggregator.pkg.apis.apiregistration.v1.APIServiceStatus") + proto.RegisterType((*ServiceReference)(nil), "k8s.io.kube_aggregator.pkg.apis.apiregistration.v1.ServiceReference") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/generated.proto", fileDescriptor_0d3d63d6a1e30d64) +} + +var fileDescriptor_0d3d63d6a1e30d64 = []byte{ + // 838 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0xdf, 0x6b, 0x2b, 0x45, + 0x14, 0xce, 0xb6, 0x49, 0x9b, 0x4e, 0xeb, 0x6d, 0x1d, 0xef, 0xe5, 0x2e, 0xe5, 0xba, 0xad, 0x11, + 0xb4, 0x0a, 0x77, 0xd7, 0x16, 0x11, 0x45, 0x10, 0xba, 0x57, 0x28, 0x85, 0x56, 0xcb, 0xa4, 0x14, + 0x51, 0x41, 0xa7, 0x9b, 0xd3, 0xed, 0x98, 0xee, 0xce, 0x32, 0x33, 0x1b, 0x08, 0xbe, 0x08, 0xfe, + 0x01, 0xfa, 0x37, 0xf9, 0xd4, 0xc7, 0x0b, 0xbe, 0xf4, 0x29, 0x98, 0xf8, 0x5f, 0xdc, 0x27, 0x99, + 0xd9, 0xd9, 0xdd, 0x34, 0x8d, 0x78, 0x6b, 0x5f, 0x42, 0xce, 0x8f, 0xef, 0xfb, 0xce, 0x9c, 0xf9, + 0x32, 0x41, 0xdf, 0xf5, 0x3f, 0x95, 0x3e, 0xe3, 0x41, 0x3f, 0x3f, 0x07, 0x91, 0x82, 0x02, 0x19, + 0x0c, 0x20, 0xed, 0x71, 0x11, 0x4c, 0x15, 0x9e, 0xd3, 0x38, 0x16, 0x10, 0x53, 0xc5, 0x45, 0x90, + 0xf5, 0xe3, 0x80, 0x66, 0x4c, 0xea, 0x0f, 0x01, 0x31, 0x93, 0x4a, 0x50, 0xc5, 0x78, 0x1a, 0x0c, + 0x76, 0x83, 0x18, 0x52, 0x10, 0x54, 0x41, 0xcf, 0xcf, 0x04, 0x57, 0x1c, 0xef, 0x15, 0x1c, 0xbe, + 0xe6, 0xf8, 0xa1, 0xe6, 0xf0, 0xb3, 0x7e, 0xec, 0x6b, 0x0e, 0x7f, 0x86, 0xc3, 0x1f, 0xec, 0x6e, + 0x3e, 0x8f, 0x99, 0xba, 0xcc, 0xcf, 0xfd, 0x88, 0x27, 0x41, 0xcc, 0x63, 0x1e, 0x18, 0xaa, 0xf3, + 0xfc, 0xc2, 0x44, 0x26, 0x30, 0xdf, 0x0a, 0x89, 0xcd, 0x8f, 0xed, 0x98, 0x34, 0x63, 0x09, 0x8d, + 0x2e, 0x59, 0x0a, 0x62, 0x58, 0xcf, 0x98, 0x80, 0xa2, 0x73, 0x06, 0xdb, 0x0c, 0xfe, 0x0d, 0x25, + 0xf2, 0x54, 0xb1, 0x04, 0xee, 0x00, 0x3e, 0xf9, 0x2f, 0x80, 0x8c, 0x2e, 0x21, 0xa1, 0xb3, 0xb8, + 0xce, 0x1f, 0x0b, 0x08, 0xed, 0x9f, 0x1c, 0x76, 0x41, 0x0c, 0x58, 0x04, 0xf8, 0x47, 0xd4, 0xd6, + 0x23, 0xf5, 0xa8, 0xa2, 0xae, 0xb3, 0xed, 0xec, 0xac, 0xee, 0x7d, 0xe4, 0xdb, 0x1d, 0x4d, 0x33, + 0xd7, 0x0b, 0xd2, 0xdd, 0xfe, 0x60, 0xd7, 0xff, 0xfa, 0xfc, 0x27, 0x88, 0xd4, 0x31, 0x28, 0x1a, + 0xe2, 0xeb, 0xd1, 0x56, 0x63, 0x32, 0xda, 0x42, 0x75, 0x8e, 0x54, 0xac, 0xb8, 0x87, 0x9a, 0x32, + 0x83, 0xc8, 0x5d, 0x30, 0xec, 0xa1, 0x7f, 0xff, 0x1b, 0xf0, 0xeb, 0x79, 0xbb, 0x19, 0x44, 0xe1, + 0x9a, 0xd5, 0x6b, 0xea, 0x88, 0x18, 0x76, 0x7c, 0x85, 0x96, 0xa4, 0xa2, 0x2a, 0x97, 0xee, 0xa2, + 0xd1, 0xf9, 0xf2, 0x81, 0x3a, 0x86, 0x2b, 0x7c, 0x64, 0x95, 0x96, 0x8a, 0x98, 0x58, 0x8d, 0xce, + 0xcd, 0x02, 0x7a, 0xab, 0x6e, 0x7e, 0xc1, 0xd3, 0x1e, 0xd3, 0x1c, 0xf8, 0x73, 0xd4, 0x54, 0xc3, + 0x0c, 0xcc, 0x26, 0x57, 0xc2, 0xf7, 0xcb, 0x39, 0x4f, 0x87, 0x19, 0xbc, 0x1a, 0x6d, 0x3d, 0x9d, + 0x03, 0xd1, 0x25, 0x62, 0x40, 0xf8, 0xb3, 0xea, 0x08, 0x0b, 0x06, 0xfe, 0xce, 0x6d, 0xf1, 0x57, + 0xa3, 0xad, 0xf5, 0x0a, 0x76, 0x7b, 0x1e, 0x3c, 0x40, 0xf8, 0x8a, 0x4a, 0x75, 0x2a, 0x68, 0x2a, + 0x0b, 0x5a, 0x96, 0x80, 0xdd, 0xc4, 0x87, 0xaf, 0x77, 0x9f, 0x1a, 0x11, 0x6e, 0x5a, 0x49, 0x7c, + 0x74, 0x87, 0x8d, 0xcc, 0x51, 0xc0, 0xef, 0xa1, 0x25, 0x01, 0x54, 0xf2, 0xd4, 0x6d, 0x9a, 0x91, + 0xab, 0x7d, 0x11, 0x93, 0x25, 0xb6, 0x8a, 0x3f, 0x40, 0xcb, 0x09, 0x48, 0x49, 0x63, 0x70, 0x5b, + 0xa6, 0x71, 0xdd, 0x36, 0x2e, 0x1f, 0x17, 0x69, 0x52, 0xd6, 0x3b, 0x7f, 0x3a, 0xe8, 0x51, 0xbd, + 0xa7, 0x23, 0x26, 0x15, 0xfe, 0xfe, 0x8e, 0x47, 0xfd, 0xd7, 0x3b, 0x93, 0x46, 0x1b, 0x87, 0x6e, + 0x58, 0xb9, 0x76, 0x99, 0x99, 0xf2, 0x67, 0x84, 0x5a, 0x4c, 0x41, 0xa2, 0xb7, 0xbe, 0xb8, 0xb3, + 0xba, 0xf7, 0xc5, 0xc3, 0x8c, 0x13, 0xbe, 0x61, 0xa5, 0x5a, 0x87, 0x9a, 0x94, 0x14, 0xdc, 0x9d, + 0xf1, 0xe2, 0xf4, 0xa9, 0xb4, 0x6f, 0x71, 0x1f, 0x2d, 0xcb, 0x22, 0xb4, 0x87, 0xfa, 0x5f, 0x96, + 0xb5, 0x8c, 0x04, 0x2e, 0x40, 0x40, 0x1a, 0x41, 0xb8, 0xaa, 0xb7, 0x5a, 0x66, 0x4b, 0x05, 0xfc, + 0x2e, 0x6a, 0xc5, 0x82, 0xe7, 0x99, 0xb5, 0x56, 0x35, 0xe4, 0x81, 0x4e, 0x92, 0xa2, 0xa6, 0x6f, + 0x69, 0x00, 0x42, 0x32, 0x9e, 0x1a, 0xeb, 0x4c, 0xdd, 0xd2, 0x59, 0x91, 0x26, 0x65, 0x1d, 0x77, + 0xd1, 0x13, 0x96, 0x4a, 0x88, 0x72, 0x01, 0xdd, 0x3e, 0xcb, 0x4e, 0x8f, 0xba, 0x67, 0x20, 0xd8, + 0xc5, 0xd0, 0xf8, 0xa0, 0x1d, 0xbe, 0x6d, 0x81, 0x4f, 0x0e, 0xe7, 0x35, 0x91, 0xf9, 0x58, 0xbc, + 0x83, 0xda, 0x11, 0x0d, 0xf3, 0xb4, 0x77, 0x55, 0xd8, 0x64, 0x2d, 0x5c, 0xd3, 0x77, 0xf6, 0x62, + 0xbf, 0xc8, 0x91, 0xaa, 0x8a, 0x4f, 0xd0, 0x63, 0x33, 0xf2, 0x89, 0x60, 0x5c, 0x30, 0x35, 0x3c, + 0x66, 0x29, 0x4b, 0xf2, 0xc4, 0x5d, 0xde, 0x76, 0x76, 0x5a, 0xe1, 0x33, 0xab, 0xfe, 0xf8, 0x60, + 0x4e, 0x0f, 0x99, 0x8b, 0xc4, 0xfb, 0x68, 0xdd, 0x9e, 0xad, 0xac, 0xb8, 0x6d, 0x43, 0xf6, 0xd4, + 0x92, 0xad, 0x9f, 0xdd, 0x2e, 0x93, 0xd9, 0xfe, 0xce, 0x6f, 0x0e, 0xda, 0x98, 0x7d, 0x41, 0xf0, + 0xcf, 0x08, 0x45, 0xe5, 0x8f, 0x56, 0xba, 0x8e, 0xb1, 0xd8, 0xc1, 0xc3, 0x2c, 0x56, 0x3d, 0x02, + 0xf5, 0xc3, 0x5b, 0xa5, 0x24, 0x99, 0x92, 0xeb, 0xfc, 0xea, 0xa0, 0x8d, 0x59, 0x83, 0xe0, 0x00, + 0xad, 0xa4, 0x34, 0x01, 0x99, 0xd1, 0xa8, 0x7c, 0xa8, 0xde, 0xb4, 0x3c, 0x2b, 0x5f, 0x95, 0x05, + 0x52, 0xf7, 0xe0, 0x6d, 0xd4, 0xd4, 0x81, 0xb5, 0x4e, 0xf5, 0xf8, 0xea, 0x5e, 0x62, 0x2a, 0xf8, + 0x19, 0x6a, 0x66, 0x5c, 0x28, 0xe3, 0x9a, 0x56, 0xd8, 0xd6, 0xd5, 0x13, 0x2e, 0x14, 0x31, 0xd9, + 0xf0, 0x9b, 0xeb, 0xb1, 0xd7, 0x78, 0x39, 0xf6, 0x1a, 0x37, 0x63, 0xaf, 0xf1, 0xcb, 0xc4, 0x73, + 0xae, 0x27, 0x9e, 0xf3, 0x72, 0xe2, 0x39, 0x37, 0x13, 0xcf, 0xf9, 0x6b, 0xe2, 0x39, 0xbf, 0xff, + 0xed, 0x35, 0xbe, 0xdd, 0xbb, 0xff, 0xbf, 0xfb, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x34, 0x09, + 0x9c, 0x10, 0x2b, 0x08, 0x00, 0x00, +} + +func (m *APIService) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *APIService) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *APIService) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *APIServiceCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *APIServiceCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *APIServiceCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *APIServiceList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *APIServiceList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *APIServiceList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *APIServiceSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *APIServiceSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *APIServiceSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.VersionPriority)) + i-- + dAtA[i] = 0x40 + i = encodeVarintGenerated(dAtA, i, uint64(m.GroupPriorityMinimum)) + i-- + dAtA[i] = 0x38 + if m.CABundle != nil { + i -= len(m.CABundle) + copy(dAtA[i:], m.CABundle) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CABundle))) + i-- + dAtA[i] = 0x2a + } + i-- + if m.InsecureSkipTLSVerify { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0x1a + i -= len(m.Group) + copy(dAtA[i:], m.Group) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i-- + dAtA[i] = 0x12 + if m.Service != nil { + { + size, err := m.Service.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *APIServiceStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *APIServiceStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *APIServiceStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ServiceReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServiceReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Port != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Port)) + i-- + dAtA[i] = 0x18 + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *APIService) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *APIServiceCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *APIServiceList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *APIServiceSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Service != nil { + l = m.Service.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Group) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Version) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if m.CABundle != nil { + l = len(m.CABundle) + n += 1 + l + sovGenerated(uint64(l)) + } + n += 1 + sovGenerated(uint64(m.GroupPriorityMinimum)) + n += 1 + sovGenerated(uint64(m.VersionPriority)) + return n +} + +func (m *APIServiceStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ServiceReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.Port != nil { + n += 1 + sovGenerated(uint64(*m.Port)) + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *APIService) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&APIService{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "APIServiceSpec", "APIServiceSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "APIServiceStatus", "APIServiceStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *APIServiceCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&APIServiceCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *APIServiceList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]APIService{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "APIService", "APIService", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&APIServiceList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *APIServiceSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&APIServiceSpec{`, + `Service:` + strings.Replace(this.Service.String(), "ServiceReference", "ServiceReference", 1) + `,`, + `Group:` + fmt.Sprintf("%v", this.Group) + `,`, + `Version:` + fmt.Sprintf("%v", this.Version) + `,`, + `InsecureSkipTLSVerify:` + fmt.Sprintf("%v", this.InsecureSkipTLSVerify) + `,`, + `CABundle:` + valueToStringGenerated(this.CABundle) + `,`, + `GroupPriorityMinimum:` + fmt.Sprintf("%v", this.GroupPriorityMinimum) + `,`, + `VersionPriority:` + fmt.Sprintf("%v", this.VersionPriority) + `,`, + `}`, + }, "") + return s +} +func (this *APIServiceStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]APIServiceCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "APIServiceCondition", "APIServiceCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&APIServiceStatus{`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s +} +func (this *ServiceReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceReference{`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Port:` + valueToStringGenerated(this.Port) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *APIService) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: APIService: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: APIService: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *APIServiceCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: APIServiceCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: APIServiceCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = APIServiceConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *APIServiceList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: APIServiceList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: APIServiceList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, APIService{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *APIServiceSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: APIServiceSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: APIServiceSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Service == nil { + m.Service = &ServiceReference{} + } + if err := m.Service.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Group = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InsecureSkipTLSVerify", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.InsecureSkipTLSVerify = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CABundle", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CABundle = append(m.CABundle[:0], dAtA[iNdEx:postIndex]...) + if m.CABundle == nil { + m.CABundle = []byte{} + } + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field GroupPriorityMinimum", wireType) + } + m.GroupPriorityMinimum = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.GroupPriorityMinimum |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field VersionPriority", wireType) + } + m.VersionPriority = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.VersionPriority |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *APIServiceStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: APIServiceStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: APIServiceStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, APIServiceCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Port = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/generated.proto b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/generated.proto new file mode 100644 index 000000000..c3ff86514 --- /dev/null +++ b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/generated.proto @@ -0,0 +1,151 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package k8s.io.kube_aggregator.pkg.apis.apiregistration.v1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1"; + +// APIService represents a server for a particular GroupVersion. +// Name must be "version.group". +message APIService { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec contains information for locating and communicating with a server + optional APIServiceSpec spec = 2; + + // Status contains derived information about an API server + optional APIServiceStatus status = 3; +} + +// APIServiceCondition describes the state of an APIService at a particular point +message APIServiceCondition { + // Type is the type of the condition. + optional string type = 1; + + // Status is the status of the condition. + // Can be True, False, Unknown. + optional string status = 2; + + // Last time the condition transitioned from one status to another. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + + // Unique, one-word, CamelCase reason for the condition's last transition. + // +optional + optional string reason = 4; + + // Human-readable message indicating details about last transition. + // +optional + optional string message = 5; +} + +// APIServiceList is a list of APIService objects. +message APIServiceList { + // Standard list metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of APIService + repeated APIService items = 2; +} + +// APIServiceSpec contains information for locating and communicating with a server. +// Only https is supported, though you are able to disable certificate verification. +message APIServiceSpec { + // Service is a reference to the service for this API server. It must communicate + // on port 443. + // If the Service is nil, that means the handling for the API groupversion is handled locally on this server. + // The call will simply delegate to the normal handler chain to be fulfilled. + // +optional + optional ServiceReference service = 1; + + // Group is the API group name this server hosts + optional string group = 2; + + // Version is the API version this server hosts. For example, "v1" + optional string version = 3; + + // InsecureSkipTLSVerify disables TLS certificate verification when communicating with this server. + // This is strongly discouraged. You should use the CABundle instead. + optional bool insecureSkipTLSVerify = 4; + + // CABundle is a PEM encoded CA bundle which will be used to validate an API server's serving certificate. + // If unspecified, system trust roots on the apiserver are used. + // +listType=atomic + // +optional + optional bytes caBundle = 5; + + // GroupPriorityMininum is the priority this group should have at least. Higher priority means that the group is preferred by clients over lower priority ones. + // Note that other versions of this group might specify even higher GroupPriorityMininum values such that the whole group gets a higher priority. + // The primary sort is based on GroupPriorityMinimum, ordered highest number to lowest (20 before 10). + // The secondary sort is based on the alphabetical comparison of the name of the object. (v1.bar before v1.foo) + // We'd recommend something like: *.k8s.io (except extensions) at 18000 and + // PaaSes (OpenShift, Deis) are recommended to be in the 2000s + optional int32 groupPriorityMinimum = 7; + + // VersionPriority controls the ordering of this API version inside of its group. Must be greater than zero. + // The primary sort is based on VersionPriority, ordered highest to lowest (20 before 10). + // Since it's inside of a group, the number can be small, probably in the 10s. + // In case of equal version priorities, the version string will be used to compute the order inside a group. + // If the version string is "kube-like", it will sort above non "kube-like" version strings, which are ordered + // lexicographically. "Kube-like" versions start with a "v", then are followed by a number (the major version), + // then optionally the string "alpha" or "beta" and another number (the minor version). These are sorted first + // by GA > beta > alpha (where GA is a version with no suffix such as beta or alpha), and then by comparing major + // version, then minor version. An example sorted list of versions: + // v10, v2, v1, v11beta2, v10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10. + optional int32 versionPriority = 8; +} + +// APIServiceStatus contains derived information about an API server +message APIServiceStatus { + // Current service state of apiService. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + repeated APIServiceCondition conditions = 1; +} + +// ServiceReference holds a reference to Service.legacy.k8s.io +message ServiceReference { + // Namespace is the namespace of the service + optional string namespace = 1; + + // Name is the name of the service + optional string name = 2; + + // If specified, the port on the service that hosting webhook. + // Default to 443 for backward compatibility. + // `port` should be a valid port number (1-65535, inclusive). + // +optional + optional int32 port = 3; +} + diff --git a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/register.go b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/register.go new file mode 100644 index 000000000..07e65bf04 --- /dev/null +++ b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/register.go @@ -0,0 +1,61 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the API group for apiregistration +const GroupName = "apiregistration.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + +// Resource takes an unqualified resource and returns back a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // SchemeBuilder is the scheme builder with scheme init functions to run for this API package + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder runtime.SchemeBuilder + localSchemeBuilder = &SchemeBuilder + // AddToScheme is a common registration function for mapping packaged scoped group & version keys to a scheme + AddToScheme = localSchemeBuilder.AddToScheme +) + +func init() { + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + localSchemeBuilder.Register(addKnownTypes, addDefaultingFuncs) +} + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &APIService{}, + &APIServiceList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/types.go b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/types.go new file mode 100644 index 000000000..9954d7e88 --- /dev/null +++ b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/types.go @@ -0,0 +1,162 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// APIServiceList is a list of APIService objects. +type APIServiceList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of APIService + Items []APIService `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// ServiceReference holds a reference to Service.legacy.k8s.io +type ServiceReference struct { + // Namespace is the namespace of the service + Namespace string `json:"namespace,omitempty" protobuf:"bytes,1,opt,name=namespace"` + // Name is the name of the service + Name string `json:"name,omitempty" protobuf:"bytes,2,opt,name=name"` + // If specified, the port on the service that hosting webhook. + // Default to 443 for backward compatibility. + // `port` should be a valid port number (1-65535, inclusive). + // +optional + Port *int32 `json:"port,omitempty" protobuf:"varint,3,opt,name=port"` +} + +// APIServiceSpec contains information for locating and communicating with a server. +// Only https is supported, though you are able to disable certificate verification. +type APIServiceSpec struct { + // Service is a reference to the service for this API server. It must communicate + // on port 443. + // If the Service is nil, that means the handling for the API groupversion is handled locally on this server. + // The call will simply delegate to the normal handler chain to be fulfilled. + // +optional + Service *ServiceReference `json:"service,omitempty" protobuf:"bytes,1,opt,name=service"` + // Group is the API group name this server hosts + Group string `json:"group,omitempty" protobuf:"bytes,2,opt,name=group"` + // Version is the API version this server hosts. For example, "v1" + Version string `json:"version,omitempty" protobuf:"bytes,3,opt,name=version"` + + // InsecureSkipTLSVerify disables TLS certificate verification when communicating with this server. + // This is strongly discouraged. You should use the CABundle instead. + InsecureSkipTLSVerify bool `json:"insecureSkipTLSVerify,omitempty" protobuf:"varint,4,opt,name=insecureSkipTLSVerify"` + // CABundle is a PEM encoded CA bundle which will be used to validate an API server's serving certificate. + // If unspecified, system trust roots on the apiserver are used. + // +listType=atomic + // +optional + CABundle []byte `json:"caBundle,omitempty" protobuf:"bytes,5,opt,name=caBundle"` + + // GroupPriorityMininum is the priority this group should have at least. Higher priority means that the group is preferred by clients over lower priority ones. + // Note that other versions of this group might specify even higher GroupPriorityMininum values such that the whole group gets a higher priority. + // The primary sort is based on GroupPriorityMinimum, ordered highest number to lowest (20 before 10). + // The secondary sort is based on the alphabetical comparison of the name of the object. (v1.bar before v1.foo) + // We'd recommend something like: *.k8s.io (except extensions) at 18000 and + // PaaSes (OpenShift, Deis) are recommended to be in the 2000s + GroupPriorityMinimum int32 `json:"groupPriorityMinimum" protobuf:"varint,7,opt,name=groupPriorityMinimum"` + + // VersionPriority controls the ordering of this API version inside of its group. Must be greater than zero. + // The primary sort is based on VersionPriority, ordered highest to lowest (20 before 10). + // Since it's inside of a group, the number can be small, probably in the 10s. + // In case of equal version priorities, the version string will be used to compute the order inside a group. + // If the version string is "kube-like", it will sort above non "kube-like" version strings, which are ordered + // lexicographically. "Kube-like" versions start with a "v", then are followed by a number (the major version), + // then optionally the string "alpha" or "beta" and another number (the minor version). These are sorted first + // by GA > beta > alpha (where GA is a version with no suffix such as beta or alpha), and then by comparing major + // version, then minor version. An example sorted list of versions: + // v10, v2, v1, v11beta2, v10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10. + VersionPriority int32 `json:"versionPriority" protobuf:"varint,8,opt,name=versionPriority"` + + // leaving this here so everyone remembers why proto index 6 is skipped + // Priority int64 `json:"priority" protobuf:"varint,6,opt,name=priority"` +} + +// ConditionStatus indicates the status of a condition (true, false, or unknown). +type ConditionStatus string + +// These are valid condition statuses. "ConditionTrue" means a resource is in the condition; +// "ConditionFalse" means a resource is not in the condition; "ConditionUnknown" means kubernetes +// can't decide if a resource is in the condition or not. In the future, we could add other +// intermediate conditions, e.g. ConditionDegraded. +const ( + ConditionTrue ConditionStatus = "True" + ConditionFalse ConditionStatus = "False" + ConditionUnknown ConditionStatus = "Unknown" +) + +// APIServiceConditionType is a valid value for APIServiceCondition.Type +type APIServiceConditionType string + +const ( + // Available indicates that the service exists and is reachable + Available APIServiceConditionType = "Available" +) + +// APIServiceCondition describes the state of an APIService at a particular point +type APIServiceCondition struct { + // Type is the type of the condition. + Type APIServiceConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=APIServiceConditionType"` + // Status is the status of the condition. + // Can be True, False, Unknown. + Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"` + // Last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // Unique, one-word, CamelCase reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // Human-readable message indicating details about last transition. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` +} + +// APIServiceStatus contains derived information about an API server +type APIServiceStatus struct { + // Current service state of apiService. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + Conditions []APIServiceCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// APIService represents a server for a particular GroupVersion. +// Name must be "version.group". +type APIService struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec contains information for locating and communicating with a server + Spec APIServiceSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + // Status contains derived information about an API server + Status APIServiceStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} diff --git a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/zz_generated.conversion.go b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/zz_generated.conversion.go new file mode 100644 index 000000000..208e23efd --- /dev/null +++ b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/zz_generated.conversion.go @@ -0,0 +1,299 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by conversion-gen. DO NOT EDIT. + +package v1 + +import ( + unsafe "unsafe" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + apiregistration "k8s.io/kube-aggregator/pkg/apis/apiregistration" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(s *runtime.Scheme) error { + if err := s.AddGeneratedConversionFunc((*APIService)(nil), (*apiregistration.APIService)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_APIService_To_apiregistration_APIService(a.(*APIService), b.(*apiregistration.APIService), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiregistration.APIService)(nil), (*APIService)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiregistration_APIService_To_v1_APIService(a.(*apiregistration.APIService), b.(*APIService), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*APIServiceCondition)(nil), (*apiregistration.APIServiceCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_APIServiceCondition_To_apiregistration_APIServiceCondition(a.(*APIServiceCondition), b.(*apiregistration.APIServiceCondition), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiregistration.APIServiceCondition)(nil), (*APIServiceCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiregistration_APIServiceCondition_To_v1_APIServiceCondition(a.(*apiregistration.APIServiceCondition), b.(*APIServiceCondition), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*APIServiceList)(nil), (*apiregistration.APIServiceList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_APIServiceList_To_apiregistration_APIServiceList(a.(*APIServiceList), b.(*apiregistration.APIServiceList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiregistration.APIServiceList)(nil), (*APIServiceList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiregistration_APIServiceList_To_v1_APIServiceList(a.(*apiregistration.APIServiceList), b.(*APIServiceList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*APIServiceSpec)(nil), (*apiregistration.APIServiceSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_APIServiceSpec_To_apiregistration_APIServiceSpec(a.(*APIServiceSpec), b.(*apiregistration.APIServiceSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiregistration.APIServiceSpec)(nil), (*APIServiceSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiregistration_APIServiceSpec_To_v1_APIServiceSpec(a.(*apiregistration.APIServiceSpec), b.(*APIServiceSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*APIServiceStatus)(nil), (*apiregistration.APIServiceStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_APIServiceStatus_To_apiregistration_APIServiceStatus(a.(*APIServiceStatus), b.(*apiregistration.APIServiceStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiregistration.APIServiceStatus)(nil), (*APIServiceStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiregistration_APIServiceStatus_To_v1_APIServiceStatus(a.(*apiregistration.APIServiceStatus), b.(*APIServiceStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ServiceReference)(nil), (*apiregistration.ServiceReference)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ServiceReference_To_apiregistration_ServiceReference(a.(*ServiceReference), b.(*apiregistration.ServiceReference), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiregistration.ServiceReference)(nil), (*ServiceReference)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiregistration_ServiceReference_To_v1_ServiceReference(a.(*apiregistration.ServiceReference), b.(*ServiceReference), scope) + }); err != nil { + return err + } + return nil +} + +func autoConvert_v1_APIService_To_apiregistration_APIService(in *APIService, out *apiregistration.APIService, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_APIServiceSpec_To_apiregistration_APIServiceSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_APIServiceStatus_To_apiregistration_APIServiceStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1_APIService_To_apiregistration_APIService is an autogenerated conversion function. +func Convert_v1_APIService_To_apiregistration_APIService(in *APIService, out *apiregistration.APIService, s conversion.Scope) error { + return autoConvert_v1_APIService_To_apiregistration_APIService(in, out, s) +} + +func autoConvert_apiregistration_APIService_To_v1_APIService(in *apiregistration.APIService, out *APIService, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_apiregistration_APIServiceSpec_To_v1_APIServiceSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_apiregistration_APIServiceStatus_To_v1_APIServiceStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_apiregistration_APIService_To_v1_APIService is an autogenerated conversion function. +func Convert_apiregistration_APIService_To_v1_APIService(in *apiregistration.APIService, out *APIService, s conversion.Scope) error { + return autoConvert_apiregistration_APIService_To_v1_APIService(in, out, s) +} + +func autoConvert_v1_APIServiceCondition_To_apiregistration_APIServiceCondition(in *APIServiceCondition, out *apiregistration.APIServiceCondition, s conversion.Scope) error { + out.Type = apiregistration.APIServiceConditionType(in.Type) + out.Status = apiregistration.ConditionStatus(in.Status) + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_v1_APIServiceCondition_To_apiregistration_APIServiceCondition is an autogenerated conversion function. +func Convert_v1_APIServiceCondition_To_apiregistration_APIServiceCondition(in *APIServiceCondition, out *apiregistration.APIServiceCondition, s conversion.Scope) error { + return autoConvert_v1_APIServiceCondition_To_apiregistration_APIServiceCondition(in, out, s) +} + +func autoConvert_apiregistration_APIServiceCondition_To_v1_APIServiceCondition(in *apiregistration.APIServiceCondition, out *APIServiceCondition, s conversion.Scope) error { + out.Type = APIServiceConditionType(in.Type) + out.Status = ConditionStatus(in.Status) + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_apiregistration_APIServiceCondition_To_v1_APIServiceCondition is an autogenerated conversion function. +func Convert_apiregistration_APIServiceCondition_To_v1_APIServiceCondition(in *apiregistration.APIServiceCondition, out *APIServiceCondition, s conversion.Scope) error { + return autoConvert_apiregistration_APIServiceCondition_To_v1_APIServiceCondition(in, out, s) +} + +func autoConvert_v1_APIServiceList_To_apiregistration_APIServiceList(in *APIServiceList, out *apiregistration.APIServiceList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]apiregistration.APIService, len(*in)) + for i := range *in { + if err := Convert_v1_APIService_To_apiregistration_APIService(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_v1_APIServiceList_To_apiregistration_APIServiceList is an autogenerated conversion function. +func Convert_v1_APIServiceList_To_apiregistration_APIServiceList(in *APIServiceList, out *apiregistration.APIServiceList, s conversion.Scope) error { + return autoConvert_v1_APIServiceList_To_apiregistration_APIServiceList(in, out, s) +} + +func autoConvert_apiregistration_APIServiceList_To_v1_APIServiceList(in *apiregistration.APIServiceList, out *APIServiceList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]APIService, len(*in)) + for i := range *in { + if err := Convert_apiregistration_APIService_To_v1_APIService(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_apiregistration_APIServiceList_To_v1_APIServiceList is an autogenerated conversion function. +func Convert_apiregistration_APIServiceList_To_v1_APIServiceList(in *apiregistration.APIServiceList, out *APIServiceList, s conversion.Scope) error { + return autoConvert_apiregistration_APIServiceList_To_v1_APIServiceList(in, out, s) +} + +func autoConvert_v1_APIServiceSpec_To_apiregistration_APIServiceSpec(in *APIServiceSpec, out *apiregistration.APIServiceSpec, s conversion.Scope) error { + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = new(apiregistration.ServiceReference) + if err := Convert_v1_ServiceReference_To_apiregistration_ServiceReference(*in, *out, s); err != nil { + return err + } + } else { + out.Service = nil + } + out.Group = in.Group + out.Version = in.Version + out.InsecureSkipTLSVerify = in.InsecureSkipTLSVerify + out.CABundle = *(*[]byte)(unsafe.Pointer(&in.CABundle)) + out.GroupPriorityMinimum = in.GroupPriorityMinimum + out.VersionPriority = in.VersionPriority + return nil +} + +// Convert_v1_APIServiceSpec_To_apiregistration_APIServiceSpec is an autogenerated conversion function. +func Convert_v1_APIServiceSpec_To_apiregistration_APIServiceSpec(in *APIServiceSpec, out *apiregistration.APIServiceSpec, s conversion.Scope) error { + return autoConvert_v1_APIServiceSpec_To_apiregistration_APIServiceSpec(in, out, s) +} + +func autoConvert_apiregistration_APIServiceSpec_To_v1_APIServiceSpec(in *apiregistration.APIServiceSpec, out *APIServiceSpec, s conversion.Scope) error { + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = new(ServiceReference) + if err := Convert_apiregistration_ServiceReference_To_v1_ServiceReference(*in, *out, s); err != nil { + return err + } + } else { + out.Service = nil + } + out.Group = in.Group + out.Version = in.Version + out.InsecureSkipTLSVerify = in.InsecureSkipTLSVerify + out.CABundle = *(*[]byte)(unsafe.Pointer(&in.CABundle)) + out.GroupPriorityMinimum = in.GroupPriorityMinimum + out.VersionPriority = in.VersionPriority + return nil +} + +// Convert_apiregistration_APIServiceSpec_To_v1_APIServiceSpec is an autogenerated conversion function. +func Convert_apiregistration_APIServiceSpec_To_v1_APIServiceSpec(in *apiregistration.APIServiceSpec, out *APIServiceSpec, s conversion.Scope) error { + return autoConvert_apiregistration_APIServiceSpec_To_v1_APIServiceSpec(in, out, s) +} + +func autoConvert_v1_APIServiceStatus_To_apiregistration_APIServiceStatus(in *APIServiceStatus, out *apiregistration.APIServiceStatus, s conversion.Scope) error { + out.Conditions = *(*[]apiregistration.APIServiceCondition)(unsafe.Pointer(&in.Conditions)) + return nil +} + +// Convert_v1_APIServiceStatus_To_apiregistration_APIServiceStatus is an autogenerated conversion function. +func Convert_v1_APIServiceStatus_To_apiregistration_APIServiceStatus(in *APIServiceStatus, out *apiregistration.APIServiceStatus, s conversion.Scope) error { + return autoConvert_v1_APIServiceStatus_To_apiregistration_APIServiceStatus(in, out, s) +} + +func autoConvert_apiregistration_APIServiceStatus_To_v1_APIServiceStatus(in *apiregistration.APIServiceStatus, out *APIServiceStatus, s conversion.Scope) error { + out.Conditions = *(*[]APIServiceCondition)(unsafe.Pointer(&in.Conditions)) + return nil +} + +// Convert_apiregistration_APIServiceStatus_To_v1_APIServiceStatus is an autogenerated conversion function. +func Convert_apiregistration_APIServiceStatus_To_v1_APIServiceStatus(in *apiregistration.APIServiceStatus, out *APIServiceStatus, s conversion.Scope) error { + return autoConvert_apiregistration_APIServiceStatus_To_v1_APIServiceStatus(in, out, s) +} + +func autoConvert_v1_ServiceReference_To_apiregistration_ServiceReference(in *ServiceReference, out *apiregistration.ServiceReference, s conversion.Scope) error { + out.Namespace = in.Namespace + out.Name = in.Name + if err := metav1.Convert_Pointer_int32_To_int32(&in.Port, &out.Port, s); err != nil { + return err + } + return nil +} + +// Convert_v1_ServiceReference_To_apiregistration_ServiceReference is an autogenerated conversion function. +func Convert_v1_ServiceReference_To_apiregistration_ServiceReference(in *ServiceReference, out *apiregistration.ServiceReference, s conversion.Scope) error { + return autoConvert_v1_ServiceReference_To_apiregistration_ServiceReference(in, out, s) +} + +func autoConvert_apiregistration_ServiceReference_To_v1_ServiceReference(in *apiregistration.ServiceReference, out *ServiceReference, s conversion.Scope) error { + out.Namespace = in.Namespace + out.Name = in.Name + if err := metav1.Convert_int32_To_Pointer_int32(&in.Port, &out.Port, s); err != nil { + return err + } + return nil +} + +// Convert_apiregistration_ServiceReference_To_v1_ServiceReference is an autogenerated conversion function. +func Convert_apiregistration_ServiceReference_To_v1_ServiceReference(in *apiregistration.ServiceReference, out *ServiceReference, s conversion.Scope) error { + return autoConvert_apiregistration_ServiceReference_To_v1_ServiceReference(in, out, s) +} diff --git a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/zz_generated.deepcopy.go b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/zz_generated.deepcopy.go new file mode 100644 index 000000000..638877245 --- /dev/null +++ b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/zz_generated.deepcopy.go @@ -0,0 +1,174 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIService) DeepCopyInto(out *APIService) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIService. +func (in *APIService) DeepCopy() *APIService { + if in == nil { + return nil + } + out := new(APIService) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *APIService) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIServiceCondition) DeepCopyInto(out *APIServiceCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServiceCondition. +func (in *APIServiceCondition) DeepCopy() *APIServiceCondition { + if in == nil { + return nil + } + out := new(APIServiceCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIServiceList) DeepCopyInto(out *APIServiceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]APIService, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServiceList. +func (in *APIServiceList) DeepCopy() *APIServiceList { + if in == nil { + return nil + } + out := new(APIServiceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *APIServiceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIServiceSpec) DeepCopyInto(out *APIServiceSpec) { + *out = *in + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = new(ServiceReference) + (*in).DeepCopyInto(*out) + } + if in.CABundle != nil { + in, out := &in.CABundle, &out.CABundle + *out = make([]byte, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServiceSpec. +func (in *APIServiceSpec) DeepCopy() *APIServiceSpec { + if in == nil { + return nil + } + out := new(APIServiceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIServiceStatus) DeepCopyInto(out *APIServiceStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]APIServiceCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServiceStatus. +func (in *APIServiceStatus) DeepCopy() *APIServiceStatus { + if in == nil { + return nil + } + out := new(APIServiceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceReference) DeepCopyInto(out *ServiceReference) { + *out = *in + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceReference. +func (in *ServiceReference) DeepCopy() *ServiceReference { + if in == nil { + return nil + } + out := new(ServiceReference) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/zz_generated.defaults.go b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/zz_generated.defaults.go new file mode 100644 index 000000000..175637ca5 --- /dev/null +++ b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1/zz_generated.defaults.go @@ -0,0 +1,48 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by defaulter-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + scheme.AddTypeDefaultingFunc(&APIService{}, func(obj interface{}) { SetObjectDefaults_APIService(obj.(*APIService)) }) + scheme.AddTypeDefaultingFunc(&APIServiceList{}, func(obj interface{}) { SetObjectDefaults_APIServiceList(obj.(*APIServiceList)) }) + return nil +} + +func SetObjectDefaults_APIService(in *APIService) { + if in.Spec.Service != nil { + SetDefaults_ServiceReference(in.Spec.Service) + } +} + +func SetObjectDefaults_APIServiceList(in *APIServiceList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_APIService(a) + } +} diff --git a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/defaults.go b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/defaults.go new file mode 100644 index 000000000..a7bfb3d9d --- /dev/null +++ b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/defaults.go @@ -0,0 +1,33 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "k8s.io/apimachinery/pkg/runtime" + utilpointer "k8s.io/utils/pointer" +) + +func addDefaultingFuncs(scheme *runtime.Scheme) error { + return RegisterDefaults(scheme) +} + +// SetDefaults_ServiceReference sets defaults for AuditSync Webhook's ServiceReference +func SetDefaults_ServiceReference(obj *ServiceReference) { + if obj.Port == nil { + obj.Port = utilpointer.Int32Ptr(443) + } +} diff --git a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/doc.go b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/doc.go new file mode 100644 index 000000000..2d5a6e44e --- /dev/null +++ b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/doc.go @@ -0,0 +1,38 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:conversion-gen=k8s.io/kube-aggregator/pkg/apis/apiregistration +// +k8s:openapi-gen=true +// +groupName=apiregistration.k8s.io +// +k8s:defaulter-gen=TypeMeta +// +k8s:prerelease-lifecycle-gen=true + +// Package v1beta1 contains the API Registration API, which is responsible for +// registering an API `Group`/`Version` with another kubernetes like API server. +// The `APIService` holds information about the other API server in +// `APIServiceSpec` type as well as general `TypeMeta` and `ObjectMeta`. The +// `APIServiceSpec` type have the main configuration needed to do the +// aggregation. Any request coming for specified `Group`/`Version` will be +// directed to the service defined by `ServiceReference` (on port 443) after +// validating the target using provided `CABundle` or skipping validation +// if development flag `InsecureSkipTLSVerify` is set. `Priority` is controlling +// the order of this API group in the overall discovery document. +// The return status is a set of conditions for this aggregation. Currently +// there is only one condition named "Available", if true, it means the +// api/server requests will be redirected to specified API server. +package v1beta1 // import "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1" diff --git a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/generated.pb.go b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/generated.pb.go new file mode 100644 index 000000000..fba302fe0 --- /dev/null +++ b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/generated.pb.go @@ -0,0 +1,1814 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/generated.proto + +package v1beta1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *APIService) Reset() { *m = APIService{} } +func (*APIService) ProtoMessage() {} +func (*APIService) Descriptor() ([]byte, []int) { + return fileDescriptor_7515b3cc5b6fa3c3, []int{0} +} +func (m *APIService) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *APIService) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *APIService) XXX_Merge(src proto.Message) { + xxx_messageInfo_APIService.Merge(m, src) +} +func (m *APIService) XXX_Size() int { + return m.Size() +} +func (m *APIService) XXX_DiscardUnknown() { + xxx_messageInfo_APIService.DiscardUnknown(m) +} + +var xxx_messageInfo_APIService proto.InternalMessageInfo + +func (m *APIServiceCondition) Reset() { *m = APIServiceCondition{} } +func (*APIServiceCondition) ProtoMessage() {} +func (*APIServiceCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_7515b3cc5b6fa3c3, []int{1} +} +func (m *APIServiceCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *APIServiceCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *APIServiceCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_APIServiceCondition.Merge(m, src) +} +func (m *APIServiceCondition) XXX_Size() int { + return m.Size() +} +func (m *APIServiceCondition) XXX_DiscardUnknown() { + xxx_messageInfo_APIServiceCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_APIServiceCondition proto.InternalMessageInfo + +func (m *APIServiceList) Reset() { *m = APIServiceList{} } +func (*APIServiceList) ProtoMessage() {} +func (*APIServiceList) Descriptor() ([]byte, []int) { + return fileDescriptor_7515b3cc5b6fa3c3, []int{2} +} +func (m *APIServiceList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *APIServiceList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *APIServiceList) XXX_Merge(src proto.Message) { + xxx_messageInfo_APIServiceList.Merge(m, src) +} +func (m *APIServiceList) XXX_Size() int { + return m.Size() +} +func (m *APIServiceList) XXX_DiscardUnknown() { + xxx_messageInfo_APIServiceList.DiscardUnknown(m) +} + +var xxx_messageInfo_APIServiceList proto.InternalMessageInfo + +func (m *APIServiceSpec) Reset() { *m = APIServiceSpec{} } +func (*APIServiceSpec) ProtoMessage() {} +func (*APIServiceSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_7515b3cc5b6fa3c3, []int{3} +} +func (m *APIServiceSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *APIServiceSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *APIServiceSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_APIServiceSpec.Merge(m, src) +} +func (m *APIServiceSpec) XXX_Size() int { + return m.Size() +} +func (m *APIServiceSpec) XXX_DiscardUnknown() { + xxx_messageInfo_APIServiceSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_APIServiceSpec proto.InternalMessageInfo + +func (m *APIServiceStatus) Reset() { *m = APIServiceStatus{} } +func (*APIServiceStatus) ProtoMessage() {} +func (*APIServiceStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_7515b3cc5b6fa3c3, []int{4} +} +func (m *APIServiceStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *APIServiceStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *APIServiceStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_APIServiceStatus.Merge(m, src) +} +func (m *APIServiceStatus) XXX_Size() int { + return m.Size() +} +func (m *APIServiceStatus) XXX_DiscardUnknown() { + xxx_messageInfo_APIServiceStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_APIServiceStatus proto.InternalMessageInfo + +func (m *ServiceReference) Reset() { *m = ServiceReference{} } +func (*ServiceReference) ProtoMessage() {} +func (*ServiceReference) Descriptor() ([]byte, []int) { + return fileDescriptor_7515b3cc5b6fa3c3, []int{5} +} +func (m *ServiceReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ServiceReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceReference.Merge(m, src) +} +func (m *ServiceReference) XXX_Size() int { + return m.Size() +} +func (m *ServiceReference) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceReference.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceReference proto.InternalMessageInfo + +func init() { + proto.RegisterType((*APIService)(nil), "k8s.io.kube_aggregator.pkg.apis.apiregistration.v1beta1.APIService") + proto.RegisterType((*APIServiceCondition)(nil), "k8s.io.kube_aggregator.pkg.apis.apiregistration.v1beta1.APIServiceCondition") + proto.RegisterType((*APIServiceList)(nil), "k8s.io.kube_aggregator.pkg.apis.apiregistration.v1beta1.APIServiceList") + proto.RegisterType((*APIServiceSpec)(nil), "k8s.io.kube_aggregator.pkg.apis.apiregistration.v1beta1.APIServiceSpec") + proto.RegisterType((*APIServiceStatus)(nil), "k8s.io.kube_aggregator.pkg.apis.apiregistration.v1beta1.APIServiceStatus") + proto.RegisterType((*ServiceReference)(nil), "k8s.io.kube_aggregator.pkg.apis.apiregistration.v1beta1.ServiceReference") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/generated.proto", fileDescriptor_7515b3cc5b6fa3c3) +} + +var fileDescriptor_7515b3cc5b6fa3c3 = []byte{ + // 845 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x56, 0xcf, 0x6f, 0xe3, 0x44, + 0x14, 0x8e, 0xdb, 0xa4, 0x4d, 0xa7, 0x65, 0x5b, 0x86, 0x5d, 0xad, 0x55, 0x2d, 0x6e, 0x09, 0x12, + 0x14, 0xa4, 0xb5, 0xe9, 0x0a, 0xb1, 0x20, 0x4e, 0x75, 0x0f, 0x55, 0xa5, 0x16, 0xaa, 0x49, 0xd5, + 0x03, 0x02, 0xb1, 0x13, 0xe7, 0xd5, 0x1d, 0xb2, 0xf6, 0x98, 0x99, 0x71, 0xa4, 0xdc, 0x56, 0xe2, + 0x1f, 0xe0, 0xc2, 0xff, 0xd4, 0x03, 0x87, 0x3d, 0xf6, 0x54, 0xd1, 0x20, 0xf1, 0x47, 0xec, 0x09, + 0xcd, 0x78, 0x6c, 0xa7, 0x49, 0x10, 0x55, 0xd5, 0x4b, 0x94, 0xf7, 0xe3, 0xfb, 0xbe, 0x37, 0x6f, + 0xbe, 0x8c, 0x82, 0x5e, 0x0d, 0xbe, 0x96, 0x3e, 0xe3, 0xc1, 0x20, 0xef, 0x81, 0x48, 0x41, 0x81, + 0x0c, 0x86, 0x90, 0xf6, 0xb9, 0x08, 0x26, 0x0a, 0xcf, 0x69, 0x1c, 0x0b, 0x88, 0xa9, 0xe2, 0x22, + 0xc8, 0x06, 0x71, 0x40, 0x33, 0x26, 0xf5, 0x87, 0x80, 0x98, 0x49, 0x25, 0xa8, 0x62, 0x3c, 0x0d, + 0x86, 0xbb, 0x3d, 0x50, 0x74, 0x37, 0x88, 0x21, 0x05, 0x41, 0x15, 0xf4, 0xfd, 0x4c, 0x70, 0xc5, + 0xf1, 0xcb, 0x82, 0xc8, 0xd7, 0x44, 0x3f, 0xd7, 0x44, 0x7e, 0x36, 0x88, 0x7d, 0x4d, 0xe4, 0x4f, + 0x11, 0xf9, 0x96, 0x68, 0xf3, 0x79, 0xcc, 0xd4, 0x45, 0xde, 0xf3, 0x23, 0x9e, 0x04, 0x31, 0x8f, + 0x79, 0x60, 0xf8, 0x7a, 0xf9, 0xb9, 0x89, 0x4c, 0x60, 0xbe, 0x15, 0x3a, 0x9b, 0x5f, 0xda, 0x81, + 0x69, 0xc6, 0x12, 0x1a, 0x5d, 0xb0, 0x14, 0xc4, 0xa8, 0x9e, 0x36, 0x01, 0x45, 0x83, 0xe1, 0xcc, + 0x74, 0x9b, 0xc1, 0x7f, 0xa1, 0x44, 0x9e, 0x2a, 0x96, 0xc0, 0x0c, 0xe0, 0xab, 0xff, 0x03, 0xc8, + 0xe8, 0x02, 0x12, 0x3a, 0x8d, 0xeb, 0xfc, 0xb9, 0x80, 0xd0, 0xde, 0xc9, 0x61, 0x17, 0xc4, 0x90, + 0x45, 0x80, 0x5f, 0xa1, 0xb6, 0x1e, 0xa9, 0x4f, 0x15, 0x75, 0x9d, 0x6d, 0x67, 0x67, 0xf5, 0xc5, + 0x17, 0xbe, 0x5d, 0xd4, 0x24, 0x73, 0xbd, 0x25, 0xdd, 0xed, 0x0f, 0x77, 0xfd, 0xef, 0x7b, 0xbf, + 0x40, 0xa4, 0x8e, 0x41, 0xd1, 0x10, 0x5f, 0x5e, 0x6f, 0x35, 0xc6, 0xd7, 0x5b, 0xa8, 0xce, 0x91, + 0x8a, 0x15, 0x33, 0xd4, 0x94, 0x19, 0x44, 0xee, 0x82, 0x61, 0x3f, 0xf0, 0xef, 0x79, 0x0d, 0x7e, + 0x3d, 0x74, 0x37, 0x83, 0x28, 0x5c, 0xb3, 0xa2, 0x4d, 0x1d, 0x11, 0x23, 0x81, 0x7f, 0x45, 0x4b, + 0x52, 0x51, 0x95, 0x4b, 0x77, 0xd1, 0x88, 0x1d, 0x3e, 0x84, 0x98, 0x21, 0x0c, 0x1f, 0x59, 0xb9, + 0xa5, 0x22, 0x26, 0x56, 0xa8, 0x73, 0xb5, 0x80, 0x3e, 0xa8, 0x9b, 0xf7, 0x79, 0xda, 0x67, 0x9a, + 0x08, 0x7f, 0x8b, 0x9a, 0x6a, 0x94, 0x81, 0xd9, 0xe9, 0x4a, 0xf8, 0x69, 0x39, 0xec, 0xe9, 0x28, + 0x83, 0x77, 0xd7, 0x5b, 0x4f, 0xe7, 0x40, 0x74, 0x89, 0x18, 0x10, 0xfe, 0xa6, 0x3a, 0xc7, 0x82, + 0x81, 0x7f, 0x74, 0x5b, 0xfc, 0xdd, 0xf5, 0xd6, 0x7a, 0x05, 0xbb, 0x3d, 0x0f, 0x1e, 0x22, 0xfc, + 0x9a, 0x4a, 0x75, 0x2a, 0x68, 0x2a, 0x0b, 0x5a, 0x96, 0x80, 0x5d, 0xc7, 0xe7, 0x77, 0xbb, 0x59, + 0x8d, 0x08, 0x37, 0xad, 0x24, 0x3e, 0x9a, 0x61, 0x23, 0x73, 0x14, 0xf0, 0x27, 0x68, 0x49, 0x00, + 0x95, 0x3c, 0x75, 0x9b, 0x66, 0xe4, 0x6a, 0x5f, 0xc4, 0x64, 0x89, 0xad, 0xe2, 0xcf, 0xd0, 0x72, + 0x02, 0x52, 0xd2, 0x18, 0xdc, 0x96, 0x69, 0x5c, 0xb7, 0x8d, 0xcb, 0xc7, 0x45, 0x9a, 0x94, 0xf5, + 0xce, 0x95, 0x83, 0x1e, 0xd5, 0x7b, 0x3a, 0x62, 0x52, 0xe1, 0x1f, 0x67, 0xdc, 0xea, 0xdf, 0xed, + 0x4c, 0x1a, 0x6d, 0xbc, 0xba, 0x61, 0xe5, 0xda, 0x65, 0x66, 0xc2, 0xa9, 0x17, 0xa8, 0xc5, 0x14, + 0x24, 0x7a, 0xeb, 0x8b, 0x3b, 0xab, 0x2f, 0xf6, 0x1f, 0xc0, 0x3d, 0xe1, 0x7b, 0x56, 0xaf, 0x75, + 0xa8, 0x99, 0x49, 0x21, 0xd0, 0xf9, 0x67, 0x71, 0xf2, 0x68, 0xda, 0xc1, 0x38, 0x43, 0xcb, 0xb2, + 0x08, 0xed, 0xc9, 0xee, 0x6f, 0x5e, 0x4b, 0x4b, 0xe0, 0x1c, 0x04, 0xa4, 0x11, 0x84, 0xab, 0x7a, + 0xbf, 0x65, 0xb6, 0x94, 0xc1, 0x1f, 0xa3, 0x56, 0x2c, 0x78, 0x9e, 0x59, 0x93, 0x55, 0x93, 0x1e, + 0xe8, 0x24, 0x29, 0x6a, 0xfa, 0xbe, 0x86, 0x20, 0x24, 0xe3, 0xa9, 0x31, 0xd1, 0xc4, 0x7d, 0x9d, + 0x15, 0x69, 0x52, 0xd6, 0x71, 0x17, 0x3d, 0x61, 0xa9, 0x84, 0x28, 0x17, 0xd0, 0x1d, 0xb0, 0xec, + 0xf4, 0xa8, 0x7b, 0x06, 0x82, 0x9d, 0x8f, 0x8c, 0x23, 0xda, 0xe1, 0x87, 0x16, 0xf8, 0xe4, 0x70, + 0x5e, 0x13, 0x99, 0x8f, 0xc5, 0x3b, 0xa8, 0x1d, 0xd1, 0x30, 0x4f, 0xfb, 0xaf, 0x0b, 0xc3, 0xac, + 0x85, 0x6b, 0xfa, 0xf6, 0xf6, 0xf7, 0x8a, 0x1c, 0xa9, 0xaa, 0xf8, 0x04, 0x3d, 0x36, 0x23, 0x9f, + 0x08, 0xc6, 0x05, 0x53, 0xa3, 0x63, 0x96, 0xb2, 0x24, 0x4f, 0xdc, 0xe5, 0x6d, 0x67, 0xa7, 0x15, + 0x3e, 0xb3, 0xea, 0x8f, 0x0f, 0xe6, 0xf4, 0x90, 0xb9, 0x48, 0xbc, 0x87, 0xd6, 0xed, 0xd9, 0xca, + 0x8a, 0xdb, 0x36, 0x64, 0x4f, 0x2d, 0xd9, 0xfa, 0xd9, 0xed, 0x32, 0x99, 0xee, 0xef, 0xfc, 0xe1, + 0xa0, 0x8d, 0xe9, 0xb7, 0x04, 0xbf, 0x71, 0x10, 0x8a, 0xca, 0xdf, 0xaf, 0x74, 0x1d, 0xe3, 0xb6, + 0xa3, 0x07, 0x70, 0x5b, 0xf5, 0x28, 0xd4, 0x4f, 0x72, 0x95, 0x92, 0x64, 0x42, 0xb3, 0xf3, 0x9b, + 0x83, 0x36, 0xa6, 0x6d, 0x82, 0x03, 0xb4, 0x92, 0xd2, 0x04, 0x64, 0x46, 0xa3, 0xf2, 0xe1, 0x7a, + 0xdf, 0xf2, 0xac, 0x7c, 0x57, 0x16, 0x48, 0xdd, 0x83, 0xb7, 0x51, 0x53, 0x07, 0xd6, 0x40, 0xd5, + 0x8b, 0xac, 0x7b, 0x89, 0xa9, 0xe0, 0x67, 0xa8, 0x99, 0x71, 0xa1, 0x8c, 0x77, 0x5a, 0x61, 0x5b, + 0x57, 0x4f, 0xb8, 0x50, 0xc4, 0x64, 0xc3, 0x9f, 0x2e, 0x6f, 0xbc, 0xc6, 0xdb, 0x1b, 0xaf, 0x71, + 0x75, 0xe3, 0x35, 0xde, 0x8c, 0x3d, 0xe7, 0x72, 0xec, 0x39, 0x6f, 0xc7, 0x9e, 0x73, 0x35, 0xf6, + 0x9c, 0xbf, 0xc6, 0x9e, 0xf3, 0xfb, 0xdf, 0x5e, 0xe3, 0x87, 0x97, 0xf7, 0xfc, 0x07, 0xf0, 0x6f, + 0x00, 0x00, 0x00, 0xff, 0xff, 0xa1, 0x90, 0x29, 0x70, 0x54, 0x08, 0x00, 0x00, +} + +func (m *APIService) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *APIService) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *APIService) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *APIServiceCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *APIServiceCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *APIServiceCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *APIServiceList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *APIServiceList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *APIServiceList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *APIServiceSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *APIServiceSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *APIServiceSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.VersionPriority)) + i-- + dAtA[i] = 0x40 + i = encodeVarintGenerated(dAtA, i, uint64(m.GroupPriorityMinimum)) + i-- + dAtA[i] = 0x38 + if m.CABundle != nil { + i -= len(m.CABundle) + copy(dAtA[i:], m.CABundle) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CABundle))) + i-- + dAtA[i] = 0x2a + } + i-- + if m.InsecureSkipTLSVerify { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0x1a + i -= len(m.Group) + copy(dAtA[i:], m.Group) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i-- + dAtA[i] = 0x12 + if m.Service != nil { + { + size, err := m.Service.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *APIServiceStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *APIServiceStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *APIServiceStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ServiceReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServiceReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Port != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Port)) + i-- + dAtA[i] = 0x18 + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *APIService) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *APIServiceCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *APIServiceList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *APIServiceSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Service != nil { + l = m.Service.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Group) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Version) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if m.CABundle != nil { + l = len(m.CABundle) + n += 1 + l + sovGenerated(uint64(l)) + } + n += 1 + sovGenerated(uint64(m.GroupPriorityMinimum)) + n += 1 + sovGenerated(uint64(m.VersionPriority)) + return n +} + +func (m *APIServiceStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ServiceReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.Port != nil { + n += 1 + sovGenerated(uint64(*m.Port)) + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *APIService) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&APIService{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "APIServiceSpec", "APIServiceSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "APIServiceStatus", "APIServiceStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *APIServiceCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&APIServiceCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *APIServiceList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]APIService{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "APIService", "APIService", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&APIServiceList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *APIServiceSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&APIServiceSpec{`, + `Service:` + strings.Replace(this.Service.String(), "ServiceReference", "ServiceReference", 1) + `,`, + `Group:` + fmt.Sprintf("%v", this.Group) + `,`, + `Version:` + fmt.Sprintf("%v", this.Version) + `,`, + `InsecureSkipTLSVerify:` + fmt.Sprintf("%v", this.InsecureSkipTLSVerify) + `,`, + `CABundle:` + valueToStringGenerated(this.CABundle) + `,`, + `GroupPriorityMinimum:` + fmt.Sprintf("%v", this.GroupPriorityMinimum) + `,`, + `VersionPriority:` + fmt.Sprintf("%v", this.VersionPriority) + `,`, + `}`, + }, "") + return s +} +func (this *APIServiceStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]APIServiceCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "APIServiceCondition", "APIServiceCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&APIServiceStatus{`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s +} +func (this *ServiceReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceReference{`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Port:` + valueToStringGenerated(this.Port) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *APIService) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: APIService: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: APIService: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *APIServiceCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: APIServiceCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: APIServiceCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = APIServiceConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *APIServiceList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: APIServiceList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: APIServiceList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, APIService{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *APIServiceSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: APIServiceSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: APIServiceSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Service == nil { + m.Service = &ServiceReference{} + } + if err := m.Service.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Group = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InsecureSkipTLSVerify", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.InsecureSkipTLSVerify = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CABundle", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CABundle = append(m.CABundle[:0], dAtA[iNdEx:postIndex]...) + if m.CABundle == nil { + m.CABundle = []byte{} + } + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field GroupPriorityMinimum", wireType) + } + m.GroupPriorityMinimum = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.GroupPriorityMinimum |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field VersionPriority", wireType) + } + m.VersionPriority = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.VersionPriority |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *APIServiceStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: APIServiceStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: APIServiceStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, APIServiceCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Port = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/generated.proto b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/generated.proto new file mode 100644 index 000000000..94d73fb7e --- /dev/null +++ b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/generated.proto @@ -0,0 +1,151 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package k8s.io.kube_aggregator.pkg.apis.apiregistration.v1beta1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1"; + +// APIService represents a server for a particular GroupVersion. +// Name must be "version.group". +message APIService { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec contains information for locating and communicating with a server + optional APIServiceSpec spec = 2; + + // Status contains derived information about an API server + optional APIServiceStatus status = 3; +} + +// APIServiceCondition describes the state of an APIService at a particular point +message APIServiceCondition { + // Type is the type of the condition. + optional string type = 1; + + // Status is the status of the condition. + // Can be True, False, Unknown. + optional string status = 2; + + // Last time the condition transitioned from one status to another. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + + // Unique, one-word, CamelCase reason for the condition's last transition. + // +optional + optional string reason = 4; + + // Human-readable message indicating details about last transition. + // +optional + optional string message = 5; +} + +// APIServiceList is a list of APIService objects. +message APIServiceList { + // Standard list metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of APIService + repeated APIService items = 2; +} + +// APIServiceSpec contains information for locating and communicating with a server. +// Only https is supported, though you are able to disable certificate verification. +message APIServiceSpec { + // Service is a reference to the service for this API server. It must communicate + // on port 443. + // If the Service is nil, that means the handling for the API groupversion is handled locally on this server. + // The call will simply delegate to the normal handler chain to be fulfilled. + // +optional + optional ServiceReference service = 1; + + // Group is the API group name this server hosts + optional string group = 2; + + // Version is the API version this server hosts. For example, "v1" + optional string version = 3; + + // InsecureSkipTLSVerify disables TLS certificate verification when communicating with this server. + // This is strongly discouraged. You should use the CABundle instead. + optional bool insecureSkipTLSVerify = 4; + + // CABundle is a PEM encoded CA bundle which will be used to validate an API server's serving certificate. + // If unspecified, system trust roots on the apiserver are used. + // +listType=atomic + // +optional + optional bytes caBundle = 5; + + // GroupPriorityMininum is the priority this group should have at least. Higher priority means that the group is preferred by clients over lower priority ones. + // Note that other versions of this group might specify even higher GroupPriorityMininum values such that the whole group gets a higher priority. + // The primary sort is based on GroupPriorityMinimum, ordered highest number to lowest (20 before 10). + // The secondary sort is based on the alphabetical comparison of the name of the object. (v1.bar before v1.foo) + // We'd recommend something like: *.k8s.io (except extensions) at 18000 and + // PaaSes (OpenShift, Deis) are recommended to be in the 2000s + optional int32 groupPriorityMinimum = 7; + + // VersionPriority controls the ordering of this API version inside of its group. Must be greater than zero. + // The primary sort is based on VersionPriority, ordered highest to lowest (20 before 10). + // Since it's inside of a group, the number can be small, probably in the 10s. + // In case of equal version priorities, the version string will be used to compute the order inside a group. + // If the version string is "kube-like", it will sort above non "kube-like" version strings, which are ordered + // lexicographically. "Kube-like" versions start with a "v", then are followed by a number (the major version), + // then optionally the string "alpha" or "beta" and another number (the minor version). These are sorted first + // by GA > beta > alpha (where GA is a version with no suffix such as beta or alpha), and then by comparing major + // version, then minor version. An example sorted list of versions: + // v10, v2, v1, v11beta2, v10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10. + optional int32 versionPriority = 8; +} + +// APIServiceStatus contains derived information about an API server +message APIServiceStatus { + // Current service state of apiService. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + repeated APIServiceCondition conditions = 1; +} + +// ServiceReference holds a reference to Service.legacy.k8s.io +message ServiceReference { + // Namespace is the namespace of the service + optional string namespace = 1; + + // Name is the name of the service + optional string name = 2; + + // If specified, the port on the service that hosting webhook. + // Default to 443 for backward compatibility. + // `port` should be a valid port number (1-65535, inclusive). + // +optional + optional int32 port = 3; +} + diff --git a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/register.go b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/register.go new file mode 100644 index 000000000..baa179571 --- /dev/null +++ b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/register.go @@ -0,0 +1,61 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the API group for apiregistration +const GroupName = "apiregistration.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + +// Resource takes an unqualified resource and returns back a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // SchemeBuilder is the scheme builder with scheme init functions to run for this API package + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder runtime.SchemeBuilder + localSchemeBuilder = &SchemeBuilder + // AddToScheme is a common registration function for mapping packaged scoped group & version keys to a scheme + AddToScheme = localSchemeBuilder.AddToScheme +) + +func init() { + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + localSchemeBuilder.Register(addKnownTypes, addDefaultingFuncs) +} + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &APIService{}, + &APIServiceList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/types.go b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/types.go new file mode 100644 index 000000000..11cb3fb65 --- /dev/null +++ b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/types.go @@ -0,0 +1,168 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.7 +// +k8s:prerelease-lifecycle-gen:deprecated=1.19 +// +k8s:prerelease-lifecycle-gen:replacement=apiregistration.k8s.io,v1,APIServiceList + +// APIServiceList is a list of APIService objects. +type APIServiceList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of APIService + Items []APIService `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// ServiceReference holds a reference to Service.legacy.k8s.io +type ServiceReference struct { + // Namespace is the namespace of the service + Namespace string `json:"namespace,omitempty" protobuf:"bytes,1,opt,name=namespace"` + // Name is the name of the service + Name string `json:"name,omitempty" protobuf:"bytes,2,opt,name=name"` + // If specified, the port on the service that hosting webhook. + // Default to 443 for backward compatibility. + // `port` should be a valid port number (1-65535, inclusive). + // +optional + Port *int32 `json:"port,omitempty" protobuf:"varint,3,opt,name=port"` +} + +// APIServiceSpec contains information for locating and communicating with a server. +// Only https is supported, though you are able to disable certificate verification. +type APIServiceSpec struct { + // Service is a reference to the service for this API server. It must communicate + // on port 443. + // If the Service is nil, that means the handling for the API groupversion is handled locally on this server. + // The call will simply delegate to the normal handler chain to be fulfilled. + // +optional + Service *ServiceReference `json:"service,omitempty" protobuf:"bytes,1,opt,name=service"` + // Group is the API group name this server hosts + Group string `json:"group,omitempty" protobuf:"bytes,2,opt,name=group"` + // Version is the API version this server hosts. For example, "v1" + Version string `json:"version,omitempty" protobuf:"bytes,3,opt,name=version"` + + // InsecureSkipTLSVerify disables TLS certificate verification when communicating with this server. + // This is strongly discouraged. You should use the CABundle instead. + InsecureSkipTLSVerify bool `json:"insecureSkipTLSVerify,omitempty" protobuf:"varint,4,opt,name=insecureSkipTLSVerify"` + // CABundle is a PEM encoded CA bundle which will be used to validate an API server's serving certificate. + // If unspecified, system trust roots on the apiserver are used. + // +listType=atomic + // +optional + CABundle []byte `json:"caBundle,omitempty" protobuf:"bytes,5,opt,name=caBundle"` + + // GroupPriorityMininum is the priority this group should have at least. Higher priority means that the group is preferred by clients over lower priority ones. + // Note that other versions of this group might specify even higher GroupPriorityMininum values such that the whole group gets a higher priority. + // The primary sort is based on GroupPriorityMinimum, ordered highest number to lowest (20 before 10). + // The secondary sort is based on the alphabetical comparison of the name of the object. (v1.bar before v1.foo) + // We'd recommend something like: *.k8s.io (except extensions) at 18000 and + // PaaSes (OpenShift, Deis) are recommended to be in the 2000s + GroupPriorityMinimum int32 `json:"groupPriorityMinimum" protobuf:"varint,7,opt,name=groupPriorityMinimum"` + + // VersionPriority controls the ordering of this API version inside of its group. Must be greater than zero. + // The primary sort is based on VersionPriority, ordered highest to lowest (20 before 10). + // Since it's inside of a group, the number can be small, probably in the 10s. + // In case of equal version priorities, the version string will be used to compute the order inside a group. + // If the version string is "kube-like", it will sort above non "kube-like" version strings, which are ordered + // lexicographically. "Kube-like" versions start with a "v", then are followed by a number (the major version), + // then optionally the string "alpha" or "beta" and another number (the minor version). These are sorted first + // by GA > beta > alpha (where GA is a version with no suffix such as beta or alpha), and then by comparing major + // version, then minor version. An example sorted list of versions: + // v10, v2, v1, v11beta2, v10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10. + VersionPriority int32 `json:"versionPriority" protobuf:"varint,8,opt,name=versionPriority"` + + // leaving this here so everyone remembers why proto index 6 is skipped + // Priority int64 `json:"priority" protobuf:"varint,6,opt,name=priority"` +} + +// ConditionStatus indicates the status of a condition (true, false, or unknown). +type ConditionStatus string + +// These are valid condition statuses. "ConditionTrue" means a resource is in the condition; +// "ConditionFalse" means a resource is not in the condition; "ConditionUnknown" means kubernetes +// can't decide if a resource is in the condition or not. In the future, we could add other +// intermediate conditions, e.g. ConditionDegraded. +const ( + ConditionTrue ConditionStatus = "True" + ConditionFalse ConditionStatus = "False" + ConditionUnknown ConditionStatus = "Unknown" +) + +// APIServiceConditionType is a valid value for APIServiceCondition.Type +type APIServiceConditionType string + +const ( + // Available indicates that the service exists and is reachable + Available APIServiceConditionType = "Available" +) + +// APIServiceCondition describes the state of an APIService at a particular point +type APIServiceCondition struct { + // Type is the type of the condition. + Type APIServiceConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=APIServiceConditionType"` + // Status is the status of the condition. + // Can be True, False, Unknown. + Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"` + // Last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // Unique, one-word, CamelCase reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // Human-readable message indicating details about last transition. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` +} + +// APIServiceStatus contains derived information about an API server +type APIServiceStatus struct { + // Current service state of apiService. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + // +listType=map + // +listMapKey=type + Conditions []APIServiceCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.7 +// +k8s:prerelease-lifecycle-gen:deprecated=1.19 +// +k8s:prerelease-lifecycle-gen:replacement=apiregistration.k8s.io,v1,APIService + +// APIService represents a server for a particular GroupVersion. +// Name must be "version.group". +type APIService struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec contains information for locating and communicating with a server + Spec APIServiceSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + // Status contains derived information about an API server + Status APIServiceStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} diff --git a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/zz_generated.conversion.go b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/zz_generated.conversion.go new file mode 100644 index 000000000..665b959f7 --- /dev/null +++ b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/zz_generated.conversion.go @@ -0,0 +1,299 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by conversion-gen. DO NOT EDIT. + +package v1beta1 + +import ( + unsafe "unsafe" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + apiregistration "k8s.io/kube-aggregator/pkg/apis/apiregistration" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(s *runtime.Scheme) error { + if err := s.AddGeneratedConversionFunc((*APIService)(nil), (*apiregistration.APIService)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_APIService_To_apiregistration_APIService(a.(*APIService), b.(*apiregistration.APIService), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiregistration.APIService)(nil), (*APIService)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiregistration_APIService_To_v1beta1_APIService(a.(*apiregistration.APIService), b.(*APIService), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*APIServiceCondition)(nil), (*apiregistration.APIServiceCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_APIServiceCondition_To_apiregistration_APIServiceCondition(a.(*APIServiceCondition), b.(*apiregistration.APIServiceCondition), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiregistration.APIServiceCondition)(nil), (*APIServiceCondition)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiregistration_APIServiceCondition_To_v1beta1_APIServiceCondition(a.(*apiregistration.APIServiceCondition), b.(*APIServiceCondition), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*APIServiceList)(nil), (*apiregistration.APIServiceList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_APIServiceList_To_apiregistration_APIServiceList(a.(*APIServiceList), b.(*apiregistration.APIServiceList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiregistration.APIServiceList)(nil), (*APIServiceList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiregistration_APIServiceList_To_v1beta1_APIServiceList(a.(*apiregistration.APIServiceList), b.(*APIServiceList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*APIServiceSpec)(nil), (*apiregistration.APIServiceSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_APIServiceSpec_To_apiregistration_APIServiceSpec(a.(*APIServiceSpec), b.(*apiregistration.APIServiceSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiregistration.APIServiceSpec)(nil), (*APIServiceSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiregistration_APIServiceSpec_To_v1beta1_APIServiceSpec(a.(*apiregistration.APIServiceSpec), b.(*APIServiceSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*APIServiceStatus)(nil), (*apiregistration.APIServiceStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_APIServiceStatus_To_apiregistration_APIServiceStatus(a.(*APIServiceStatus), b.(*apiregistration.APIServiceStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiregistration.APIServiceStatus)(nil), (*APIServiceStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiregistration_APIServiceStatus_To_v1beta1_APIServiceStatus(a.(*apiregistration.APIServiceStatus), b.(*APIServiceStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ServiceReference)(nil), (*apiregistration.ServiceReference)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_ServiceReference_To_apiregistration_ServiceReference(a.(*ServiceReference), b.(*apiregistration.ServiceReference), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*apiregistration.ServiceReference)(nil), (*ServiceReference)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_apiregistration_ServiceReference_To_v1beta1_ServiceReference(a.(*apiregistration.ServiceReference), b.(*ServiceReference), scope) + }); err != nil { + return err + } + return nil +} + +func autoConvert_v1beta1_APIService_To_apiregistration_APIService(in *APIService, out *apiregistration.APIService, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1beta1_APIServiceSpec_To_apiregistration_APIServiceSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1beta1_APIServiceStatus_To_apiregistration_APIServiceStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1beta1_APIService_To_apiregistration_APIService is an autogenerated conversion function. +func Convert_v1beta1_APIService_To_apiregistration_APIService(in *APIService, out *apiregistration.APIService, s conversion.Scope) error { + return autoConvert_v1beta1_APIService_To_apiregistration_APIService(in, out, s) +} + +func autoConvert_apiregistration_APIService_To_v1beta1_APIService(in *apiregistration.APIService, out *APIService, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_apiregistration_APIServiceSpec_To_v1beta1_APIServiceSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_apiregistration_APIServiceStatus_To_v1beta1_APIServiceStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_apiregistration_APIService_To_v1beta1_APIService is an autogenerated conversion function. +func Convert_apiregistration_APIService_To_v1beta1_APIService(in *apiregistration.APIService, out *APIService, s conversion.Scope) error { + return autoConvert_apiregistration_APIService_To_v1beta1_APIService(in, out, s) +} + +func autoConvert_v1beta1_APIServiceCondition_To_apiregistration_APIServiceCondition(in *APIServiceCondition, out *apiregistration.APIServiceCondition, s conversion.Scope) error { + out.Type = apiregistration.APIServiceConditionType(in.Type) + out.Status = apiregistration.ConditionStatus(in.Status) + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_v1beta1_APIServiceCondition_To_apiregistration_APIServiceCondition is an autogenerated conversion function. +func Convert_v1beta1_APIServiceCondition_To_apiregistration_APIServiceCondition(in *APIServiceCondition, out *apiregistration.APIServiceCondition, s conversion.Scope) error { + return autoConvert_v1beta1_APIServiceCondition_To_apiregistration_APIServiceCondition(in, out, s) +} + +func autoConvert_apiregistration_APIServiceCondition_To_v1beta1_APIServiceCondition(in *apiregistration.APIServiceCondition, out *APIServiceCondition, s conversion.Scope) error { + out.Type = APIServiceConditionType(in.Type) + out.Status = ConditionStatus(in.Status) + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_apiregistration_APIServiceCondition_To_v1beta1_APIServiceCondition is an autogenerated conversion function. +func Convert_apiregistration_APIServiceCondition_To_v1beta1_APIServiceCondition(in *apiregistration.APIServiceCondition, out *APIServiceCondition, s conversion.Scope) error { + return autoConvert_apiregistration_APIServiceCondition_To_v1beta1_APIServiceCondition(in, out, s) +} + +func autoConvert_v1beta1_APIServiceList_To_apiregistration_APIServiceList(in *APIServiceList, out *apiregistration.APIServiceList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]apiregistration.APIService, len(*in)) + for i := range *in { + if err := Convert_v1beta1_APIService_To_apiregistration_APIService(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_v1beta1_APIServiceList_To_apiregistration_APIServiceList is an autogenerated conversion function. +func Convert_v1beta1_APIServiceList_To_apiregistration_APIServiceList(in *APIServiceList, out *apiregistration.APIServiceList, s conversion.Scope) error { + return autoConvert_v1beta1_APIServiceList_To_apiregistration_APIServiceList(in, out, s) +} + +func autoConvert_apiregistration_APIServiceList_To_v1beta1_APIServiceList(in *apiregistration.APIServiceList, out *APIServiceList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]APIService, len(*in)) + for i := range *in { + if err := Convert_apiregistration_APIService_To_v1beta1_APIService(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_apiregistration_APIServiceList_To_v1beta1_APIServiceList is an autogenerated conversion function. +func Convert_apiregistration_APIServiceList_To_v1beta1_APIServiceList(in *apiregistration.APIServiceList, out *APIServiceList, s conversion.Scope) error { + return autoConvert_apiregistration_APIServiceList_To_v1beta1_APIServiceList(in, out, s) +} + +func autoConvert_v1beta1_APIServiceSpec_To_apiregistration_APIServiceSpec(in *APIServiceSpec, out *apiregistration.APIServiceSpec, s conversion.Scope) error { + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = new(apiregistration.ServiceReference) + if err := Convert_v1beta1_ServiceReference_To_apiregistration_ServiceReference(*in, *out, s); err != nil { + return err + } + } else { + out.Service = nil + } + out.Group = in.Group + out.Version = in.Version + out.InsecureSkipTLSVerify = in.InsecureSkipTLSVerify + out.CABundle = *(*[]byte)(unsafe.Pointer(&in.CABundle)) + out.GroupPriorityMinimum = in.GroupPriorityMinimum + out.VersionPriority = in.VersionPriority + return nil +} + +// Convert_v1beta1_APIServiceSpec_To_apiregistration_APIServiceSpec is an autogenerated conversion function. +func Convert_v1beta1_APIServiceSpec_To_apiregistration_APIServiceSpec(in *APIServiceSpec, out *apiregistration.APIServiceSpec, s conversion.Scope) error { + return autoConvert_v1beta1_APIServiceSpec_To_apiregistration_APIServiceSpec(in, out, s) +} + +func autoConvert_apiregistration_APIServiceSpec_To_v1beta1_APIServiceSpec(in *apiregistration.APIServiceSpec, out *APIServiceSpec, s conversion.Scope) error { + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = new(ServiceReference) + if err := Convert_apiregistration_ServiceReference_To_v1beta1_ServiceReference(*in, *out, s); err != nil { + return err + } + } else { + out.Service = nil + } + out.Group = in.Group + out.Version = in.Version + out.InsecureSkipTLSVerify = in.InsecureSkipTLSVerify + out.CABundle = *(*[]byte)(unsafe.Pointer(&in.CABundle)) + out.GroupPriorityMinimum = in.GroupPriorityMinimum + out.VersionPriority = in.VersionPriority + return nil +} + +// Convert_apiregistration_APIServiceSpec_To_v1beta1_APIServiceSpec is an autogenerated conversion function. +func Convert_apiregistration_APIServiceSpec_To_v1beta1_APIServiceSpec(in *apiregistration.APIServiceSpec, out *APIServiceSpec, s conversion.Scope) error { + return autoConvert_apiregistration_APIServiceSpec_To_v1beta1_APIServiceSpec(in, out, s) +} + +func autoConvert_v1beta1_APIServiceStatus_To_apiregistration_APIServiceStatus(in *APIServiceStatus, out *apiregistration.APIServiceStatus, s conversion.Scope) error { + out.Conditions = *(*[]apiregistration.APIServiceCondition)(unsafe.Pointer(&in.Conditions)) + return nil +} + +// Convert_v1beta1_APIServiceStatus_To_apiregistration_APIServiceStatus is an autogenerated conversion function. +func Convert_v1beta1_APIServiceStatus_To_apiregistration_APIServiceStatus(in *APIServiceStatus, out *apiregistration.APIServiceStatus, s conversion.Scope) error { + return autoConvert_v1beta1_APIServiceStatus_To_apiregistration_APIServiceStatus(in, out, s) +} + +func autoConvert_apiregistration_APIServiceStatus_To_v1beta1_APIServiceStatus(in *apiregistration.APIServiceStatus, out *APIServiceStatus, s conversion.Scope) error { + out.Conditions = *(*[]APIServiceCondition)(unsafe.Pointer(&in.Conditions)) + return nil +} + +// Convert_apiregistration_APIServiceStatus_To_v1beta1_APIServiceStatus is an autogenerated conversion function. +func Convert_apiregistration_APIServiceStatus_To_v1beta1_APIServiceStatus(in *apiregistration.APIServiceStatus, out *APIServiceStatus, s conversion.Scope) error { + return autoConvert_apiregistration_APIServiceStatus_To_v1beta1_APIServiceStatus(in, out, s) +} + +func autoConvert_v1beta1_ServiceReference_To_apiregistration_ServiceReference(in *ServiceReference, out *apiregistration.ServiceReference, s conversion.Scope) error { + out.Namespace = in.Namespace + out.Name = in.Name + if err := v1.Convert_Pointer_int32_To_int32(&in.Port, &out.Port, s); err != nil { + return err + } + return nil +} + +// Convert_v1beta1_ServiceReference_To_apiregistration_ServiceReference is an autogenerated conversion function. +func Convert_v1beta1_ServiceReference_To_apiregistration_ServiceReference(in *ServiceReference, out *apiregistration.ServiceReference, s conversion.Scope) error { + return autoConvert_v1beta1_ServiceReference_To_apiregistration_ServiceReference(in, out, s) +} + +func autoConvert_apiregistration_ServiceReference_To_v1beta1_ServiceReference(in *apiregistration.ServiceReference, out *ServiceReference, s conversion.Scope) error { + out.Namespace = in.Namespace + out.Name = in.Name + if err := v1.Convert_int32_To_Pointer_int32(&in.Port, &out.Port, s); err != nil { + return err + } + return nil +} + +// Convert_apiregistration_ServiceReference_To_v1beta1_ServiceReference is an autogenerated conversion function. +func Convert_apiregistration_ServiceReference_To_v1beta1_ServiceReference(in *apiregistration.ServiceReference, out *ServiceReference, s conversion.Scope) error { + return autoConvert_apiregistration_ServiceReference_To_v1beta1_ServiceReference(in, out, s) +} diff --git a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 000000000..989688e9f --- /dev/null +++ b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,174 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1beta1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIService) DeepCopyInto(out *APIService) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIService. +func (in *APIService) DeepCopy() *APIService { + if in == nil { + return nil + } + out := new(APIService) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *APIService) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIServiceCondition) DeepCopyInto(out *APIServiceCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServiceCondition. +func (in *APIServiceCondition) DeepCopy() *APIServiceCondition { + if in == nil { + return nil + } + out := new(APIServiceCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIServiceList) DeepCopyInto(out *APIServiceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]APIService, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServiceList. +func (in *APIServiceList) DeepCopy() *APIServiceList { + if in == nil { + return nil + } + out := new(APIServiceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *APIServiceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIServiceSpec) DeepCopyInto(out *APIServiceSpec) { + *out = *in + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = new(ServiceReference) + (*in).DeepCopyInto(*out) + } + if in.CABundle != nil { + in, out := &in.CABundle, &out.CABundle + *out = make([]byte, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServiceSpec. +func (in *APIServiceSpec) DeepCopy() *APIServiceSpec { + if in == nil { + return nil + } + out := new(APIServiceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIServiceStatus) DeepCopyInto(out *APIServiceStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]APIServiceCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServiceStatus. +func (in *APIServiceStatus) DeepCopy() *APIServiceStatus { + if in == nil { + return nil + } + out := new(APIServiceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceReference) DeepCopyInto(out *ServiceReference) { + *out = *in + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceReference. +func (in *ServiceReference) DeepCopy() *ServiceReference { + if in == nil { + return nil + } + out := new(ServiceReference) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/zz_generated.defaults.go b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/zz_generated.defaults.go new file mode 100644 index 000000000..034247c30 --- /dev/null +++ b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/zz_generated.defaults.go @@ -0,0 +1,48 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by defaulter-gen. DO NOT EDIT. + +package v1beta1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + scheme.AddTypeDefaultingFunc(&APIService{}, func(obj interface{}) { SetObjectDefaults_APIService(obj.(*APIService)) }) + scheme.AddTypeDefaultingFunc(&APIServiceList{}, func(obj interface{}) { SetObjectDefaults_APIServiceList(obj.(*APIServiceList)) }) + return nil +} + +func SetObjectDefaults_APIService(in *APIService) { + if in.Spec.Service != nil { + SetDefaults_ServiceReference(in.Spec.Service) + } +} + +func SetObjectDefaults_APIServiceList(in *APIServiceList) { + for i := range in.Items { + a := &in.Items[i] + SetObjectDefaults_APIService(a) + } +} diff --git a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/zz_generated.prerelease-lifecycle.go b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 000000000..e29944718 --- /dev/null +++ b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,74 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1beta1 + +import ( + schema "k8s.io/apimachinery/pkg/runtime/schema" +) + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *APIService) APILifecycleIntroduced() (major, minor int) { + return 1, 7 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *APIService) APILifecycleDeprecated() (major, minor int) { + return 1, 19 +} + +// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. +// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. +func (in *APIService) APILifecycleReplacement() schema.GroupVersionKind { + return schema.GroupVersionKind{Group: "apiregistration.k8s.io", Version: "v1", Kind: "APIService"} +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *APIService) APILifecycleRemoved() (major, minor int) { + return 1, 22 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *APIServiceList) APILifecycleIntroduced() (major, minor int) { + return 1, 7 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *APIServiceList) APILifecycleDeprecated() (major, minor int) { + return 1, 19 +} + +// APILifecycleReplacement is an autogenerated function, returning the group, version, and kind that should be used instead of this deprecated type. +// It is controlled by "k8s:prerelease-lifecycle-gen:replacement=,," tags in types.go. +func (in *APIServiceList) APILifecycleReplacement() schema.GroupVersionKind { + return schema.GroupVersionKind{Group: "apiregistration.k8s.io", Version: "v1", Kind: "APIServiceList"} +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *APIServiceList) APILifecycleRemoved() (major, minor int) { + return 1, 22 +} diff --git a/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/zz_generated.deepcopy.go b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/zz_generated.deepcopy.go new file mode 100644 index 000000000..45d0347c0 --- /dev/null +++ b/vendor/k8s.io/kube-aggregator/pkg/apis/apiregistration/zz_generated.deepcopy.go @@ -0,0 +1,221 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package apiregistration + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIService) DeepCopyInto(out *APIService) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIService. +func (in *APIService) DeepCopy() *APIService { + if in == nil { + return nil + } + out := new(APIService) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *APIService) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIServiceCondition) DeepCopyInto(out *APIServiceCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServiceCondition. +func (in *APIServiceCondition) DeepCopy() *APIServiceCondition { + if in == nil { + return nil + } + out := new(APIServiceCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIServiceList) DeepCopyInto(out *APIServiceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]APIService, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServiceList. +func (in *APIServiceList) DeepCopy() *APIServiceList { + if in == nil { + return nil + } + out := new(APIServiceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *APIServiceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIServiceSpec) DeepCopyInto(out *APIServiceSpec) { + *out = *in + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = new(ServiceReference) + **out = **in + } + if in.CABundle != nil { + in, out := &in.CABundle, &out.CABundle + *out = make([]byte, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServiceSpec. +func (in *APIServiceSpec) DeepCopy() *APIServiceSpec { + if in == nil { + return nil + } + out := new(APIServiceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIServiceStatus) DeepCopyInto(out *APIServiceStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]APIServiceCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIServiceStatus. +func (in *APIServiceStatus) DeepCopy() *APIServiceStatus { + if in == nil { + return nil + } + out := new(APIServiceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in ByGroupPriorityMinimum) DeepCopyInto(out *ByGroupPriorityMinimum) { + { + in := &in + *out = make(ByGroupPriorityMinimum, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(APIService) + (*in).DeepCopyInto(*out) + } + } + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ByGroupPriorityMinimum. +func (in ByGroupPriorityMinimum) DeepCopy() ByGroupPriorityMinimum { + if in == nil { + return nil + } + out := new(ByGroupPriorityMinimum) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in ByVersionPriority) DeepCopyInto(out *ByVersionPriority) { + { + in := &in + *out = make(ByVersionPriority, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(APIService) + (*in).DeepCopyInto(*out) + } + } + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ByVersionPriority. +func (in ByVersionPriority) DeepCopy() ByVersionPriority { + if in == nil { + return nil + } + out := new(ByVersionPriority) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceReference) DeepCopyInto(out *ServiceReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceReference. +func (in *ServiceReference) DeepCopy() *ServiceReference { + if in == nil { + return nil + } + out := new(ServiceReference) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme/doc.go b/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme/doc.go new file mode 100644 index 000000000..7dc375616 --- /dev/null +++ b/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package contains the scheme of the automatically generated clientset. +package scheme diff --git a/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme/register.go b/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme/register.go new file mode 100644 index 000000000..cb2193911 --- /dev/null +++ b/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme/register.go @@ -0,0 +1,58 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package scheme + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + apiregistrationv1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1" + apiregistrationv1beta1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1" +) + +var Scheme = runtime.NewScheme() +var Codecs = serializer.NewCodecFactory(Scheme) +var ParameterCodec = runtime.NewParameterCodec(Scheme) +var localSchemeBuilder = runtime.SchemeBuilder{ + apiregistrationv1beta1.AddToScheme, + apiregistrationv1.AddToScheme, +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(Scheme)) +} diff --git a/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1/apiregistration_client.go b/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1/apiregistration_client.go new file mode 100644 index 000000000..f6dc74aa9 --- /dev/null +++ b/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1/apiregistration_client.go @@ -0,0 +1,107 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "net/http" + + rest "k8s.io/client-go/rest" + v1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1" + "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +) + +type ApiregistrationV1Interface interface { + RESTClient() rest.Interface + APIServicesGetter +} + +// ApiregistrationV1Client is used to interact with features provided by the apiregistration.k8s.io group. +type ApiregistrationV1Client struct { + restClient rest.Interface +} + +func (c *ApiregistrationV1Client) APIServices() APIServiceInterface { + return newAPIServices(c) +} + +// NewForConfig creates a new ApiregistrationV1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*ApiregistrationV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new ApiregistrationV1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*ApiregistrationV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) + if err != nil { + return nil, err + } + return &ApiregistrationV1Client{client}, nil +} + +// NewForConfigOrDie creates a new ApiregistrationV1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *ApiregistrationV1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new ApiregistrationV1Client for the given RESTClient. +func New(c rest.Interface) *ApiregistrationV1Client { + return &ApiregistrationV1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *ApiregistrationV1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1/apiservice.go b/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1/apiservice.go new file mode 100644 index 000000000..25bf6ea44 --- /dev/null +++ b/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1/apiservice.go @@ -0,0 +1,184 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "context" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + v1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1" + scheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +) + +// APIServicesGetter has a method to return a APIServiceInterface. +// A group's client should implement this interface. +type APIServicesGetter interface { + APIServices() APIServiceInterface +} + +// APIServiceInterface has methods to work with APIService resources. +type APIServiceInterface interface { + Create(ctx context.Context, aPIService *v1.APIService, opts metav1.CreateOptions) (*v1.APIService, error) + Update(ctx context.Context, aPIService *v1.APIService, opts metav1.UpdateOptions) (*v1.APIService, error) + UpdateStatus(ctx context.Context, aPIService *v1.APIService, opts metav1.UpdateOptions) (*v1.APIService, error) + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.APIService, error) + List(ctx context.Context, opts metav1.ListOptions) (*v1.APIServiceList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.APIService, err error) + APIServiceExpansion +} + +// aPIServices implements APIServiceInterface +type aPIServices struct { + client rest.Interface +} + +// newAPIServices returns a APIServices +func newAPIServices(c *ApiregistrationV1Client) *aPIServices { + return &aPIServices{ + client: c.RESTClient(), + } +} + +// Get takes name of the aPIService, and returns the corresponding aPIService object, and an error if there is any. +func (c *aPIServices) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.APIService, err error) { + result = &v1.APIService{} + err = c.client.Get(). + Resource("apiservices"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of APIServices that match those selectors. +func (c *aPIServices) List(ctx context.Context, opts metav1.ListOptions) (result *v1.APIServiceList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.APIServiceList{} + err = c.client.Get(). + Resource("apiservices"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested aPIServices. +func (c *aPIServices) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("apiservices"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a aPIService and creates it. Returns the server's representation of the aPIService, and an error, if there is any. +func (c *aPIServices) Create(ctx context.Context, aPIService *v1.APIService, opts metav1.CreateOptions) (result *v1.APIService, err error) { + result = &v1.APIService{} + err = c.client.Post(). + Resource("apiservices"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(aPIService). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a aPIService and updates it. Returns the server's representation of the aPIService, and an error, if there is any. +func (c *aPIServices) Update(ctx context.Context, aPIService *v1.APIService, opts metav1.UpdateOptions) (result *v1.APIService, err error) { + result = &v1.APIService{} + err = c.client.Put(). + Resource("apiservices"). + Name(aPIService.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(aPIService). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *aPIServices) UpdateStatus(ctx context.Context, aPIService *v1.APIService, opts metav1.UpdateOptions) (result *v1.APIService, err error) { + result = &v1.APIService{} + err = c.client.Put(). + Resource("apiservices"). + Name(aPIService.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(aPIService). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the aPIService and deletes it. Returns an error if one occurs. +func (c *aPIServices) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error { + return c.client.Delete(). + Resource("apiservices"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *aPIServices) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("apiservices"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched aPIService. +func (c *aPIServices) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.APIService, err error) { + result = &v1.APIService{} + err = c.client.Patch(pt). + Resource("apiservices"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1/doc.go b/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1/doc.go new file mode 100644 index 000000000..3af5d054f --- /dev/null +++ b/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1 diff --git a/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1/generated_expansion.go b/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1/generated_expansion.go new file mode 100644 index 000000000..87aa18716 --- /dev/null +++ b/vendor/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1/generated_expansion.go @@ -0,0 +1,21 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +type APIServiceExpansion interface{} diff --git a/vendor/k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1/api.pb.go b/vendor/k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1/api.pb.go index 4f183239c..a24d42881 100644 --- a/vendor/k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1/api.pb.go +++ b/vendor/k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1/api.pb.go @@ -1101,71 +1101,73 @@ func init() { func init() { proto.RegisterFile("api.proto", fileDescriptor_00212fb1f9d3bf1c) } var fileDescriptor_00212fb1f9d3bf1c = []byte{ - // 1014 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x56, 0x5f, 0x6f, 0x1b, 0x45, - 0x10, 0xcf, 0xc5, 0x4d, 0x62, 0x8f, 0x5d, 0x27, 0xdd, 0x84, 0xc8, 0xb9, 0x04, 0x37, 0xdd, 0x14, - 0x1a, 0xa4, 0xc6, 0x21, 0x2e, 0x6a, 0x11, 0x0f, 0x08, 0x17, 0x07, 0xb0, 0x42, 0x53, 0xeb, 0x42, - 0xc5, 0x03, 0x88, 0xd3, 0xf9, 0x6e, 0x63, 0x9f, 0x38, 0xef, 0x1e, 0xb7, 0x6b, 0x4b, 0xae, 0x84, - 0xc4, 0x03, 0x1f, 0xa0, 0xdf, 0x01, 0xbe, 0x02, 0xdf, 0xa1, 0x8f, 0x3c, 0xf2, 0x84, 0xa8, 0xf9, - 0x22, 0xe8, 0x76, 0xef, 0x9f, 0x2e, 0x17, 0x2b, 0x95, 0x78, 0xf3, 0xce, 0xcc, 0x6f, 0xfe, 0xfc, - 0x66, 0x3c, 0x73, 0x50, 0xb1, 0x7c, 0xb7, 0xe5, 0x07, 0x4c, 0x30, 0xb4, 0x36, 0x3d, 0x19, 0x10, - 0x61, 0x9d, 0xe8, 0x47, 0x43, 0x57, 0x8c, 0x26, 0x83, 0x96, 0xcd, 0xc6, 0xc7, 0x43, 0x36, 0x64, - 0xc7, 0x52, 0x3f, 0x98, 0x5c, 0xca, 0x97, 0x7c, 0xc8, 0x5f, 0x0a, 0x87, 0x5f, 0x69, 0xb0, 0xd9, - 0x25, 0x53, 0xd7, 0x26, 0x7d, 0x6f, 0x32, 0x74, 0xe9, 0x73, 0x5f, 0xb8, 0x8c, 0x72, 0xf4, 0x10, - 0x90, 0x1f, 0x10, 0x93, 0x0b, 0x2b, 0x10, 0x66, 0x40, 0x7e, 0x9a, 0xb8, 0x01, 0x71, 0x1a, 0xda, - 0xbe, 0x76, 0x58, 0x36, 0x36, 0xfc, 0x80, 0x5c, 0x84, 0x0a, 0x23, 0x92, 0xa3, 0x33, 0xc0, 0x43, - 0x22, 0x4c, 0x3f, 0x20, 0x97, 0x24, 0x08, 0x88, 0x63, 0x5a, 0x9e, 0xc7, 0x6c, 0x2b, 0x74, 0x65, - 0x5a, 0x53, 0xcb, 0xf5, 0xac, 0x81, 0x47, 0x1a, 0xcb, 0x12, 0x7d, 0x77, 0x48, 0x44, 0x3f, 0x36, - 0xec, 0x24, 0x76, 0x9d, 0xd8, 0x0c, 0xff, 0xae, 0xc1, 0xba, 0x41, 0x86, 0x2e, 0x17, 0x24, 0x08, - 0x23, 0x10, 0x2e, 0x50, 0x03, 0xd6, 0xa6, 0x24, 0xe0, 0x2e, 0xa3, 0x32, 0x87, 0x8a, 0x11, 0x3f, - 0x91, 0x0e, 0x65, 0x42, 0x1d, 0x9f, 0xb9, 0x54, 0xc8, 0x00, 0x15, 0x23, 0x79, 0xa3, 0x03, 0xb8, - 0x1d, 0x10, 0xce, 0x26, 0x81, 0x4d, 0x4c, 0x6a, 0x8d, 0x49, 0xa3, 0x24, 0x0d, 0x6a, 0xb1, 0xf0, - 0xdc, 0x1a, 0x13, 0xf4, 0x18, 0xd6, 0x98, 0x2a, 0xba, 0x71, 0x6b, 0x5f, 0x3b, 0xac, 0xb6, 0xf7, - 0x5a, 0x11, 0x97, 0xad, 0x02, 0x62, 0x8c, 0xd8, 0x18, 0xaf, 0xc1, 0xca, 0xe9, 0xd8, 0x17, 0x33, - 0xdc, 0x81, 0xad, 0xaf, 0x5d, 0x2e, 0x3a, 0xd4, 0xf9, 0xd6, 0x12, 0xf6, 0xc8, 0x20, 0xdc, 0x67, - 0x94, 0x13, 0xf4, 0x01, 0xac, 0x39, 0xd2, 0x01, 0x6f, 0x68, 0xfb, 0xa5, 0xc3, 0x6a, 0x7b, 0x3d, - 0xe7, 0xd8, 0x88, 0xf5, 0xf8, 0x09, 0xd4, 0xbe, 0x61, 0x3e, 0xf3, 0xd8, 0x70, 0xd6, 0xa3, 0x97, - 0x0c, 0x3d, 0x80, 0x15, 0xca, 0x9c, 0x04, 0x78, 0x27, 0x01, 0x9e, 0xbf, 0x78, 0xd6, 0x39, 0x67, - 0x0e, 0x31, 0x94, 0x1e, 0xeb, 0x50, 0x8e, 0x45, 0xa8, 0x0e, 0xcb, 0xbd, 0xae, 0xa4, 0xa7, 0x64, - 0x2c, 0xf7, 0xba, 0xd8, 0x86, 0x55, 0x15, 0x27, 0xa3, 0xa9, 0x84, 0x1a, 0xb4, 0x0d, 0xab, 0x23, - 0x62, 0x79, 0x62, 0x14, 0x31, 0x16, 0xbd, 0xd0, 0x09, 0x94, 0x45, 0x94, 0x86, 0xa4, 0xaa, 0xda, - 0x7e, 0x27, 0x89, 0x9c, 0xcd, 0xcf, 0x48, 0xcc, 0xf0, 0x19, 0x34, 0xfa, 0xd1, 0x34, 0x7c, 0xce, - 0xa8, 0xb0, 0x5c, 0x9a, 0x36, 0xed, 0x18, 0xaa, 0x51, 0x81, 0xa6, 0xeb, 0xa8, 0x5a, 0x2a, 0x4f, - 0xeb, 0xf3, 0xbf, 0xef, 0x82, 0xca, 0x8b, 0xf7, 0xba, 0xdc, 0x80, 0xc8, 0xa4, 0xe7, 0x70, 0xbc, - 0x0b, 0x3b, 0x05, 0xce, 0x14, 0x9d, 0x78, 0x06, 0x7a, 0xc1, 0xd8, 0xc4, 0xb1, 0xbe, 0x03, 0x64, - 0xc7, 0x10, 0x39, 0xaf, 0x84, 0x8b, 0x98, 0xbe, 0x87, 0x49, 0x11, 0x89, 0xd7, 0xeb, 0x3d, 0x19, - 0x77, 0xec, 0x5c, 0x1d, 0x1c, 0xff, 0xa1, 0xc1, 0xc1, 0x0d, 0xa0, 0xe8, 0x18, 0x36, 0x93, 0x69, - 0x37, 0x55, 0x5d, 0xbd, 0x6e, 0x54, 0xb8, 0x81, 0x12, 0x55, 0x37, 0xd6, 0xa0, 0x8f, 0x60, 0x7b, - 0x3c, 0xe1, 0xc2, 0x74, 0xa9, 0xed, 0x4d, 0x9c, 0x2c, 0x66, 0x59, 0x62, 0xb6, 0x42, 0x6d, 0x4f, - 0x29, 0x53, 0xd4, 0x03, 0x58, 0xcf, 0xfc, 0xbf, 0xb8, 0xfb, 0x52, 0x0d, 0xf6, 0x8a, 0x51, 0x4f, - 0xc5, 0x17, 0xee, 0x4b, 0x82, 0x7f, 0x86, 0xdd, 0xc2, 0x6c, 0xa3, 0x01, 0xfd, 0x01, 0x36, 0xb3, - 0x9c, 0x29, 0x69, 0x4c, 0xda, 0xd1, 0x0d, 0x49, 0x53, 0x28, 0x03, 0xd9, 0xf9, 0x86, 0x71, 0xdc, - 0x85, 0xfb, 0x37, 0xc1, 0xa2, 0x3d, 0xa8, 0xe4, 0xc9, 0x4a, 0x05, 0xd8, 0x86, 0xf5, 0x08, 0x43, - 0x62, 0x9e, 0xfb, 0x0b, 0x9a, 0x7d, 0xef, 0x6a, 0xde, 0x39, 0x78, 0x51, 0x87, 0xcf, 0xa0, 0x71, - 0x9d, 0xf9, 0xdb, 0x8f, 0xf1, 0x10, 0x36, 0x52, 0x1f, 0x51, 0x8d, 0x17, 0x8b, 0xb8, 0xc6, 0x8b, - 0x72, 0x5e, 0x40, 0xf0, 0xaf, 0x25, 0xd8, 0xb9, 0x16, 0x81, 0x3e, 0x83, 0x5b, 0x84, 0x4e, 0x17, - 0xfc, 0x09, 0xf2, 0x88, 0xd6, 0x29, 0x9d, 0xf2, 0x53, 0x2a, 0x82, 0x99, 0x21, 0x91, 0xe8, 0x7d, - 0x58, 0x1d, 0xb3, 0x09, 0x15, 0x6a, 0x1c, 0xab, 0xed, 0x7a, 0xe2, 0xe3, 0x59, 0x28, 0x36, 0x22, - 0x2d, 0x3a, 0x4a, 0x37, 0x5d, 0x49, 0x1a, 0x6e, 0xe6, 0x36, 0xdd, 0x85, 0x4f, 0xec, 0x64, 0xdb, - 0xa1, 0x17, 0x50, 0xb5, 0x28, 0x65, 0xc2, 0x8a, 0xb7, 0x6e, 0x08, 0x79, 0x74, 0x83, 0xfc, 0x3a, - 0x29, 0x4a, 0xa5, 0x99, 0xf5, 0xa3, 0x3f, 0x81, 0x4a, 0x52, 0x00, 0xda, 0x80, 0xd2, 0x8f, 0x64, - 0x16, 0xed, 0xbc, 0xf0, 0x27, 0xda, 0x82, 0x95, 0xa9, 0xe5, 0x4d, 0x48, 0xb4, 0xf3, 0xd4, 0xe3, - 0x93, 0xe5, 0x8f, 0x35, 0xfd, 0x53, 0xd8, 0xc8, 0x7b, 0x7e, 0x1b, 0x3c, 0x1e, 0xc1, 0x8a, 0xe4, - 0x03, 0xbd, 0x07, 0xf5, 0xb4, 0xc9, 0xbe, 0x25, 0x46, 0x11, 0xfe, 0x76, 0x22, 0xed, 0x5b, 0x62, - 0x84, 0x76, 0xa1, 0x32, 0x62, 0x5c, 0x28, 0x8b, 0xe8, 0x66, 0x85, 0x82, 0x58, 0x19, 0x10, 0xcb, - 0x31, 0x19, 0xf5, 0xd4, 0x12, 0x2e, 0x1b, 0xe5, 0x50, 0xf0, 0x9c, 0x7a, 0x33, 0x1c, 0x00, 0xa4, - 0x84, 0xfe, 0x2f, 0xe1, 0xf6, 0xa1, 0xea, 0x93, 0x60, 0xec, 0x72, 0x2e, 0x7b, 0xa1, 0x0e, 0x64, - 0x56, 0xd4, 0xfe, 0x02, 0x6a, 0xea, 0x1a, 0x07, 0x92, 0x1f, 0xf4, 0x18, 0xca, 0xf1, 0x75, 0x46, - 0x8d, 0xa4, 0x69, 0xb9, 0x83, 0xad, 0xa7, 0xa3, 0xa2, 0x8e, 0xe4, 0x52, 0xfb, 0xb7, 0x12, 0xd4, - 0xb2, 0x07, 0x15, 0x7d, 0x05, 0xdb, 0x5f, 0x12, 0x51, 0xf4, 0xf1, 0x91, 0x03, 0xeb, 0x0b, 0x2f, - 0x32, 0x5e, 0x42, 0x1d, 0xa8, 0x65, 0x2f, 0xf0, 0x15, 0xfc, 0xbb, 0xc9, 0xbb, 0xe8, 0x50, 0xe3, - 0xa5, 0x0f, 0x35, 0x44, 0x64, 0x32, 0x05, 0x5b, 0x0a, 0x1d, 0x24, 0xe0, 0xeb, 0x37, 0xbf, 0x7e, - 0x7f, 0xb1, 0x51, 0x1c, 0x08, 0x75, 0xa0, 0x1c, 0x4f, 0x75, 0x86, 0xbc, 0xdc, 0xc6, 0xd1, 0x77, - 0x0a, 0x34, 0x89, 0x8b, 0xef, 0xe1, 0xce, 0x95, 0x23, 0x89, 0xee, 0x65, 0xe3, 0x17, 0x5e, 0x63, - 0x1d, 0x2f, 0x32, 0x89, 0xbd, 0x3f, 0xdd, 0x7b, 0xfd, 0xa6, 0xa9, 0xfd, 0xf5, 0xa6, 0xb9, 0xf4, - 0xcb, 0xbc, 0xa9, 0xbd, 0x9e, 0x37, 0xb5, 0x3f, 0xe7, 0x4d, 0xed, 0x9f, 0x79, 0x53, 0x7b, 0xf5, - 0x6f, 0x73, 0x69, 0xb0, 0x2a, 0x3f, 0x1a, 0x1f, 0xfd, 0x17, 0x00, 0x00, 0xff, 0xff, 0x8b, 0x2d, - 0xfa, 0x93, 0x79, 0x0a, 0x00, 0x00, + // 1044 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x56, 0xdd, 0x6e, 0x1b, 0x45, + 0x14, 0xce, 0xc6, 0x4d, 0x62, 0x1f, 0xa7, 0xf9, 0x99, 0x84, 0xc8, 0xd9, 0x14, 0x37, 0x9d, 0x14, + 0x1a, 0xa4, 0xc4, 0x26, 0x2e, 0x6a, 0x2b, 0x2e, 0x10, 0x2e, 0x0e, 0x60, 0x42, 0xd3, 0x68, 0x43, + 0x85, 0x04, 0x08, 0x6b, 0xbc, 0x3b, 0xb1, 0x57, 0x59, 0xcf, 0x2c, 0x3b, 0x63, 0x4b, 0xae, 0x84, + 0xc4, 0x05, 0x0f, 0xd0, 0x77, 0x80, 0x57, 0xe0, 0x1d, 0x7a, 0xc9, 0x25, 0x57, 0x88, 0x9a, 0x17, + 0x41, 0x9e, 0xd9, 0x3f, 0x6d, 0x36, 0x56, 0x2a, 0x71, 0xe7, 0x39, 0xe7, 0x7c, 0xe7, 0xe7, 0x3b, + 0x67, 0xcf, 0x31, 0x94, 0x88, 0xef, 0xd6, 0xfc, 0x80, 0x4b, 0x8e, 0x96, 0x46, 0x47, 0x5d, 0x2a, + 0xc9, 0x91, 0x79, 0xd8, 0x73, 0x65, 0x7f, 0xd8, 0xad, 0xd9, 0x7c, 0x50, 0xef, 0xf1, 0x1e, 0xaf, + 0x2b, 0x7d, 0x77, 0x78, 0xa1, 0x5e, 0xea, 0xa1, 0x7e, 0x69, 0x1c, 0x7e, 0x65, 0xc0, 0x46, 0x8b, + 0x8e, 0x5c, 0x9b, 0x9e, 0x79, 0xc3, 0x9e, 0xcb, 0x9e, 0xfb, 0xd2, 0xe5, 0x4c, 0xa0, 0x03, 0x40, + 0x7e, 0x40, 0x3b, 0x42, 0x92, 0x40, 0x76, 0x02, 0xfa, 0xd3, 0xd0, 0x0d, 0xa8, 0x53, 0x31, 0x76, + 0x8d, 0xfd, 0xa2, 0xb5, 0xe6, 0x07, 0xf4, 0x7c, 0xaa, 0xb0, 0x42, 0x39, 0x3a, 0x01, 0xdc, 0xa3, + 0xb2, 0xe3, 0x07, 0xf4, 0x82, 0x06, 0x01, 0x75, 0x3a, 0xc4, 0xf3, 0xb8, 0x4d, 0xa6, 0xae, 0x3a, + 0x64, 0x44, 0x5c, 0x8f, 0x74, 0x3d, 0x5a, 0x99, 0x57, 0xe8, 0xbb, 0x3d, 0x2a, 0xcf, 0x22, 0xc3, + 0x66, 0x6c, 0xd7, 0x8c, 0xcc, 0xf0, 0xef, 0x06, 0xac, 0x5a, 0xb4, 0xe7, 0x0a, 0x49, 0x83, 0x69, + 0x04, 0x2a, 0x24, 0xaa, 0xc0, 0xd2, 0x88, 0x06, 0xc2, 0xe5, 0x4c, 0xe5, 0x50, 0xb2, 0xa2, 0x27, + 0x32, 0xa1, 0x48, 0x99, 0xe3, 0x73, 0x97, 0x49, 0x15, 0xa0, 0x64, 0xc5, 0x6f, 0xb4, 0x07, 0xb7, + 0x03, 0x2a, 0xf8, 0x30, 0xb0, 0x69, 0x87, 0x91, 0x01, 0xad, 0x14, 0x94, 0xc1, 0x72, 0x24, 0x3c, + 0x25, 0x03, 0x8a, 0x1e, 0xc1, 0x12, 0xd7, 0x45, 0x57, 0x6e, 0xed, 0x1a, 0xfb, 0xe5, 0xc6, 0x9d, + 0x5a, 0xc8, 0x65, 0x2d, 0x87, 0x18, 0x2b, 0x32, 0xc6, 0x4b, 0xb0, 0x70, 0x3c, 0xf0, 0xe5, 0x18, + 0x37, 0x61, 0xf3, 0x6b, 0x57, 0xc8, 0x26, 0x73, 0xbe, 0x25, 0xd2, 0xee, 0x5b, 0x54, 0xf8, 0x9c, + 0x09, 0x8a, 0x3e, 0x80, 0x25, 0x47, 0x39, 0x10, 0x15, 0x63, 0xb7, 0xb0, 0x5f, 0x6e, 0xac, 0x66, + 0x1c, 0x5b, 0x91, 0x1e, 0x3f, 0x86, 0xe5, 0x6f, 0xb8, 0xcf, 0x3d, 0xde, 0x1b, 0xb7, 0xd9, 0x05, + 0x47, 0x0f, 0x60, 0x81, 0x71, 0x27, 0x06, 0xae, 0xc7, 0xc0, 0xd3, 0x17, 0xcf, 0x9a, 0xa7, 0xdc, + 0xa1, 0x96, 0xd6, 0x63, 0x13, 0x8a, 0x91, 0x08, 0xad, 0xc0, 0x7c, 0xbb, 0xa5, 0xe8, 0x29, 0x58, + 0xf3, 0xed, 0x16, 0xb6, 0x61, 0x51, 0xc7, 0x49, 0x69, 0x4a, 0x53, 0x0d, 0xda, 0x82, 0xc5, 0x3e, + 0x25, 0x9e, 0xec, 0x87, 0x8c, 0x85, 0x2f, 0x74, 0x04, 0x45, 0x19, 0xa6, 0xa1, 0xa8, 0x2a, 0x37, + 0xde, 0x89, 0x23, 0xa7, 0xf3, 0xb3, 0x62, 0x33, 0x7c, 0x02, 0x95, 0xb3, 0x70, 0x1a, 0x3e, 0xe3, + 0x4c, 0x12, 0x97, 0x25, 0x4d, 0xab, 0x43, 0x39, 0x2c, 0xb0, 0xe3, 0x3a, 0xba, 0x96, 0xd2, 0xd3, + 0x95, 0xc9, 0xdf, 0x77, 0x41, 0xe7, 0x25, 0xda, 0x2d, 0x61, 0x41, 0x68, 0xd2, 0x76, 0x04, 0xde, + 0x81, 0xed, 0x1c, 0x67, 0x9a, 0x4e, 0x3c, 0x06, 0x33, 0x67, 0x6c, 0xa2, 0x58, 0xdf, 0x03, 0xb2, + 0x23, 0x88, 0x9a, 0x57, 0x2a, 0x64, 0x44, 0xdf, 0x41, 0x5c, 0x44, 0xec, 0xf5, 0x7a, 0x4f, 0xd6, + 0xba, 0x9d, 0xa9, 0x43, 0xe0, 0x3f, 0x0c, 0xd8, 0xbb, 0x01, 0x14, 0xd5, 0x61, 0x23, 0x9e, 0xf6, + 0x8e, 0xae, 0xab, 0xdd, 0x0a, 0x0b, 0xb7, 0x50, 0xac, 0x6a, 0x45, 0x1a, 0xf4, 0x11, 0x6c, 0x0d, + 0x86, 0x42, 0x76, 0x5c, 0x66, 0x7b, 0x43, 0x27, 0x8d, 0x99, 0x57, 0x98, 0xcd, 0xa9, 0xb6, 0xad, + 0x95, 0x09, 0xea, 0x01, 0xac, 0xa6, 0xbe, 0x2f, 0xe1, 0xbe, 0xd4, 0x83, 0xbd, 0x60, 0xad, 0x24, + 0xe2, 0x73, 0xf7, 0x25, 0xc5, 0x3f, 0xc3, 0x4e, 0x6e, 0xb6, 0xe1, 0x80, 0xfe, 0x08, 0x1b, 0x69, + 0xce, 0xb4, 0x34, 0x22, 0xed, 0xf0, 0x86, 0xa4, 0x69, 0x94, 0x85, 0xec, 0x6c, 0xc3, 0x04, 0x6e, + 0xc1, 0xfd, 0x9b, 0x60, 0xd1, 0x1d, 0x28, 0x65, 0xc9, 0x4a, 0x04, 0xd8, 0x86, 0xd5, 0x10, 0x43, + 0x23, 0x9e, 0xcf, 0x66, 0x34, 0xfb, 0xde, 0xd5, 0xbc, 0x33, 0xf0, 0xbc, 0x0e, 0x9f, 0x40, 0xe5, + 0x3a, 0xf3, 0xb7, 0x1f, 0xe3, 0x1e, 0xac, 0x25, 0x3e, 0xc2, 0x1a, 0xcf, 0x67, 0x71, 0x8d, 0x67, + 0xe5, 0x3c, 0x83, 0xe0, 0x5f, 0x0b, 0xb0, 0x7d, 0x2d, 0x02, 0x7d, 0x0a, 0xb7, 0x28, 0x1b, 0xcd, + 0xf8, 0x08, 0xb2, 0x88, 0xda, 0x31, 0x1b, 0x89, 0x63, 0x26, 0x83, 0xb1, 0xa5, 0x90, 0xe8, 0x7d, + 0x58, 0x1c, 0xf0, 0x21, 0x93, 0x7a, 0x1c, 0xcb, 0x8d, 0x95, 0xd8, 0xc7, 0xb3, 0xa9, 0xd8, 0x0a, + 0xb5, 0xe8, 0x30, 0xd9, 0x74, 0x05, 0x65, 0xb8, 0x91, 0xd9, 0x74, 0xe7, 0x3e, 0xb5, 0xe3, 0x6d, + 0x87, 0x5e, 0x40, 0x99, 0x30, 0xc6, 0x25, 0x89, 0xb6, 0xee, 0x14, 0xf2, 0xf0, 0x06, 0xf9, 0x35, + 0x13, 0x94, 0x4e, 0x33, 0xed, 0xc7, 0x7c, 0x0c, 0xa5, 0xb8, 0x00, 0xb4, 0x06, 0x85, 0x4b, 0x3a, + 0x0e, 0x77, 0xde, 0xf4, 0x27, 0xda, 0x84, 0x85, 0x11, 0xf1, 0x86, 0x34, 0xdc, 0x79, 0xfa, 0xf1, + 0xf1, 0xfc, 0x13, 0xc3, 0xfc, 0x04, 0xd6, 0xb2, 0x9e, 0xdf, 0x06, 0x8f, 0xfb, 0xb0, 0xa0, 0xf8, + 0x40, 0xef, 0xc1, 0x4a, 0xd2, 0x64, 0x9f, 0xc8, 0x7e, 0x88, 0xbf, 0x1d, 0x4b, 0xcf, 0x88, 0xec, + 0xa3, 0x1d, 0x28, 0xf5, 0xb9, 0x90, 0xda, 0x22, 0xbc, 0x59, 0x53, 0x41, 0xa4, 0x0c, 0x28, 0x71, + 0x3a, 0x9c, 0x79, 0x7a, 0x09, 0x17, 0xad, 0xe2, 0x54, 0xf0, 0x9c, 0x79, 0x63, 0x1c, 0x00, 0x24, + 0x84, 0xfe, 0x2f, 0xe1, 0x76, 0xa1, 0xec, 0xd3, 0x60, 0xe0, 0x0a, 0xa1, 0x7a, 0xa1, 0x0f, 0x64, + 0x5a, 0xd4, 0xf8, 0x1c, 0x96, 0xf5, 0x35, 0x0e, 0x14, 0x3f, 0xe8, 0x11, 0x14, 0xa3, 0xeb, 0x8c, + 0x2a, 0x71, 0xd3, 0x32, 0x07, 0xdb, 0x4c, 0x46, 0x45, 0x1f, 0xc9, 0xb9, 0xc6, 0x6f, 0x05, 0x58, + 0x4e, 0x1f, 0x54, 0xf4, 0x25, 0x6c, 0x7d, 0x41, 0x65, 0xde, 0x9f, 0x8f, 0x0c, 0xd8, 0x9c, 0x79, + 0x91, 0xf1, 0x1c, 0x6a, 0xc2, 0x72, 0xfa, 0x02, 0x5f, 0xc1, 0xbf, 0x1b, 0xbf, 0xf3, 0x0e, 0x35, + 0x9e, 0xfb, 0xd0, 0x40, 0x54, 0x25, 0x93, 0xb3, 0xa5, 0xd0, 0x5e, 0x0c, 0xbe, 0x7e, 0xf3, 0x9b, + 0xf7, 0x67, 0x1b, 0x45, 0x81, 0x50, 0x13, 0x8a, 0xd1, 0x54, 0xa7, 0xc8, 0xcb, 0x6c, 0x1c, 0x73, + 0x3b, 0x47, 0x13, 0xbb, 0xf8, 0x01, 0xd6, 0xaf, 0x1c, 0x49, 0x74, 0x2f, 0x1d, 0x3f, 0xf7, 0x1a, + 0x9b, 0x78, 0x96, 0x49, 0xe4, 0xfd, 0xe9, 0x57, 0xaf, 0xdf, 0x54, 0x8d, 0xbf, 0xde, 0x54, 0xe7, + 0x7e, 0x99, 0x54, 0x8d, 0xd7, 0x93, 0xaa, 0xf1, 0xe7, 0xa4, 0x6a, 0xfc, 0x33, 0xa9, 0x1a, 0xaf, + 0xfe, 0xad, 0xce, 0x7d, 0x77, 0x70, 0xf9, 0x44, 0xd4, 0x5c, 0x5e, 0xbf, 0x1c, 0x76, 0xa9, 0x47, + 0x65, 0xdd, 0xbf, 0xec, 0xd5, 0x89, 0xef, 0x8a, 0xba, 0xfe, 0xb4, 0x7d, 0xd5, 0x97, 0x7a, 0x18, + 0xa7, 0xbb, 0xa8, 0xfe, 0x62, 0x3e, 0xfc, 0x2f, 0x00, 0x00, 0xff, 0xff, 0xe2, 0xf2, 0x09, 0x79, + 0xa7, 0x0a, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. diff --git a/vendor/k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1/api.proto b/vendor/k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1/api.proto index d22da05b2..f44e7cebd 100644 --- a/vendor/k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1/api.proto +++ b/vendor/k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1/api.proto @@ -1,7 +1,8 @@ -// To regenerate api.pb.go run hack/update-generated-device-plugin.sh +// To regenerate api.pb.go run `hack/update-codegen.sh protobindings` syntax = "proto3"; package v1beta1; +option go_package = "k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1"; import "github.com/gogo/protobuf/gogoproto/gogo.proto"; diff --git a/vendor/modules.txt b/vendor/modules.txt index 47e092460..ff446fb7a 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -22,15 +22,21 @@ github.com/Mellanox/sriovnet/pkg/utils/netlinkops # github.com/StackExchange/wmi v1.2.1 ## explicit; go 1.13 github.com/StackExchange/wmi -# github.com/ajeddeloh/go-json v0.0.0-20170920214419-6a2fe990e083 +# github.com/ajeddeloh/go-json v0.0.0-20200220154158-5ae607161559 ## explicit github.com/ajeddeloh/go-json +# github.com/aws/aws-sdk-go v1.44.204 +## explicit; go 1.11 +github.com/aws/aws-sdk-go/aws/arn # github.com/beorn7/perks v1.0.1 ## explicit; go 1.11 github.com/beorn7/perks/quantile # github.com/blang/semver v3.5.1+incompatible ## explicit github.com/blang/semver +# github.com/blang/semver/v4 v4.0.0 +## explicit; go 1.14 +github.com/blang/semver/v4 # github.com/cenkalti/backoff v2.2.1+incompatible ## explicit github.com/cenkalti/backoff @@ -43,15 +49,15 @@ github.com/chai2010/gettext-go github.com/chai2010/gettext-go/mo github.com/chai2010/gettext-go/plural github.com/chai2010/gettext-go/po -# github.com/clarketm/json v1.14.1 +# github.com/clarketm/json v1.17.1 ## explicit github.com/clarketm/json # github.com/coreos/fcct v0.5.0 ## explicit; go 1.12 github.com/coreos/fcct/base/v0_1 github.com/coreos/fcct/translate -# github.com/coreos/go-json v0.0.0-20211020211907-c63f628265de -## explicit; go 1.15 +# github.com/coreos/go-json v0.0.0-20230131223807-18775e0fb4fb +## explicit; go 1.18 github.com/coreos/go-json # github.com/coreos/go-semver v0.3.1 ## explicit; go 1.8 @@ -62,11 +68,13 @@ github.com/coreos/go-systemd/unit # github.com/coreos/go-systemd/v22 v22.5.0 ## explicit; go 1.12 github.com/coreos/go-systemd/v22/unit -# github.com/coreos/ign-converter v0.0.0-20201123214124-8dac862888aa -## explicit; go 1.13 +# github.com/coreos/ign-converter v0.0.0-20230417193809-cee89ea7d8ff +## explicit; go 1.18 github.com/coreos/ign-converter/translate/v23tov30 github.com/coreos/ign-converter/translate/v32tov22 github.com/coreos/ign-converter/translate/v32tov31 +github.com/coreos/ign-converter/translate/v33tov32 +github.com/coreos/ign-converter/translate/v34tov33 github.com/coreos/ign-converter/util # github.com/coreos/ignition v0.35.0 ## explicit @@ -88,10 +96,11 @@ github.com/coreos/ignition/config/validate github.com/coreos/ignition/config/validate/astjson github.com/coreos/ignition/config/validate/astnode github.com/coreos/ignition/config/validate/report -# github.com/coreos/ignition/v2 v2.14.0 -## explicit; go 1.15 +# github.com/coreos/ignition/v2 v2.15.0 +## explicit; go 1.18 github.com/coreos/ignition/v2/config/merge github.com/coreos/ignition/v2/config/shared/errors +github.com/coreos/ignition/v2/config/shared/parse github.com/coreos/ignition/v2/config/shared/validations github.com/coreos/ignition/v2/config/translate github.com/coreos/ignition/v2/config/util @@ -103,15 +112,21 @@ github.com/coreos/ignition/v2/config/v3_1/types github.com/coreos/ignition/v2/config/v3_2 github.com/coreos/ignition/v2/config/v3_2/translate github.com/coreos/ignition/v2/config/v3_2/types +github.com/coreos/ignition/v2/config/v3_3 +github.com/coreos/ignition/v2/config/v3_3/translate +github.com/coreos/ignition/v2/config/v3_3/types +github.com/coreos/ignition/v2/config/v3_4 +github.com/coreos/ignition/v2/config/v3_4/translate +github.com/coreos/ignition/v2/config/v3_4/types github.com/coreos/ignition/v2/config/validate -# github.com/coreos/vcontext v0.0.0-20211021162308-f1dbbca7bef4 -## explicit; go 1.15 +# github.com/coreos/vcontext v0.0.0-20230201181013-d72178a18687 +## explicit; go 1.18 github.com/coreos/vcontext/json github.com/coreos/vcontext/path github.com/coreos/vcontext/report github.com/coreos/vcontext/tree github.com/coreos/vcontext/validate -# github.com/davecgh/go-spew v1.1.1 +# github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc ## explicit github.com/davecgh/go-spew/spew # github.com/emicklei/go-restful/v3 v3.11.0 @@ -127,6 +142,8 @@ github.com/evanphx/json-patch/v5 # github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d ## explicit github.com/exponent-io/jsonpath +# github.com/frankban/quicktest v1.14.4 +## explicit; go 1.13 # github.com/fsnotify/fsnotify v1.7.0 ## explicit; go 1.17 github.com/fsnotify/fsnotify @@ -287,8 +304,6 @@ github.com/liggitt/tabwriter github.com/mailru/easyjson/buffer github.com/mailru/easyjson/jlexer github.com/mailru/easyjson/jwriter -# github.com/mattn/go-isatty v0.0.17 -## explicit; go 1.15 # github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 ## explicit; go 1.19 github.com/matttproud/golang_protobuf_extensions/v2/pbutil @@ -363,23 +378,102 @@ github.com/onsi/gomega/types # github.com/openshift-kni/k8sreporter v1.0.4 ## explicit; go 1.20 github.com/openshift-kni/k8sreporter -# github.com/openshift/api v0.0.0-20221220162201-efeef9d83325 -## explicit; go 1.18 +# github.com/openshift/api v0.0.0-20230807132801-600991d550ac +## explicit; go 1.20 +github.com/openshift/api +github.com/openshift/api/apiserver +github.com/openshift/api/apiserver/v1 +github.com/openshift/api/apps +github.com/openshift/api/apps/v1 +github.com/openshift/api/authorization +github.com/openshift/api/authorization/v1 +github.com/openshift/api/build +github.com/openshift/api/build/v1 +github.com/openshift/api/cloudnetwork +github.com/openshift/api/cloudnetwork/v1 +github.com/openshift/api/config github.com/openshift/api/config/v1 +github.com/openshift/api/config/v1alpha1 +github.com/openshift/api/console +github.com/openshift/api/console/v1 +github.com/openshift/api/console/v1alpha1 +github.com/openshift/api/helm +github.com/openshift/api/helm/v1beta1 +github.com/openshift/api/image +github.com/openshift/api/image/docker10 +github.com/openshift/api/image/dockerpre012 +github.com/openshift/api/image/v1 +github.com/openshift/api/imageregistry +github.com/openshift/api/imageregistry/v1 +github.com/openshift/api/kubecontrolplane +github.com/openshift/api/kubecontrolplane/v1 +github.com/openshift/api/legacyconfig/v1 +github.com/openshift/api/machine +github.com/openshift/api/machine/v1 +github.com/openshift/api/machine/v1alpha1 +github.com/openshift/api/machine/v1beta1 +github.com/openshift/api/monitoring +github.com/openshift/api/monitoring/v1alpha1 +github.com/openshift/api/network +github.com/openshift/api/network/v1 +github.com/openshift/api/networkoperator +github.com/openshift/api/networkoperator/v1 +github.com/openshift/api/oauth +github.com/openshift/api/oauth/v1 +github.com/openshift/api/openshiftcontrolplane +github.com/openshift/api/openshiftcontrolplane/v1 +github.com/openshift/api/operator github.com/openshift/api/operator/v1 github.com/openshift/api/operator/v1alpha1 -# github.com/openshift/client-go v0.0.0-20220831193253-4950ae70c8ea -## explicit; go 1.18 +github.com/openshift/api/operatorcontrolplane +github.com/openshift/api/operatorcontrolplane/v1alpha1 +github.com/openshift/api/osin +github.com/openshift/api/osin/v1 +github.com/openshift/api/pkg/serialization +github.com/openshift/api/project +github.com/openshift/api/project/v1 +github.com/openshift/api/quota +github.com/openshift/api/quota/v1 +github.com/openshift/api/route +github.com/openshift/api/route/v1 +github.com/openshift/api/samples +github.com/openshift/api/samples/v1 +github.com/openshift/api/security +github.com/openshift/api/security/v1 +github.com/openshift/api/servicecertsigner +github.com/openshift/api/servicecertsigner/v1alpha1 +github.com/openshift/api/sharedresource +github.com/openshift/api/sharedresource/v1alpha1 +github.com/openshift/api/template +github.com/openshift/api/template/v1 +github.com/openshift/api/user +github.com/openshift/api/user/v1 +# github.com/openshift/client-go v0.0.0-20230607134213-3cd0021bbee3 +## explicit; go 1.20 +github.com/openshift/client-go/build/applyconfigurations/build/v1 +github.com/openshift/client-go/build/applyconfigurations/internal +github.com/openshift/client-go/build/clientset/versioned +github.com/openshift/client-go/build/clientset/versioned/scheme +github.com/openshift/client-go/build/clientset/versioned/typed/build/v1 github.com/openshift/client-go/config/applyconfigurations/config/v1 +github.com/openshift/client-go/config/applyconfigurations/config/v1alpha1 github.com/openshift/client-go/config/applyconfigurations/internal github.com/openshift/client-go/config/clientset/versioned github.com/openshift/client-go/config/clientset/versioned/scheme github.com/openshift/client-go/config/clientset/versioned/typed/config/v1 +github.com/openshift/client-go/config/clientset/versioned/typed/config/v1alpha1 github.com/openshift/client-go/config/informers/externalversions github.com/openshift/client-go/config/informers/externalversions/config github.com/openshift/client-go/config/informers/externalversions/config/v1 +github.com/openshift/client-go/config/informers/externalversions/config/v1alpha1 github.com/openshift/client-go/config/informers/externalversions/internalinterfaces github.com/openshift/client-go/config/listers/config/v1 +github.com/openshift/client-go/config/listers/config/v1alpha1 +github.com/openshift/client-go/image/applyconfigurations/image/v1 +github.com/openshift/client-go/image/applyconfigurations/internal +github.com/openshift/client-go/image/clientset/versioned +github.com/openshift/client-go/image/clientset/versioned/scheme +github.com/openshift/client-go/image/clientset/versioned/typed/image/v1 github.com/openshift/client-go/operator/applyconfigurations/internal github.com/openshift/client-go/operator/applyconfigurations/operator/v1 github.com/openshift/client-go/operator/applyconfigurations/operator/v1alpha1 @@ -394,8 +488,23 @@ github.com/openshift/client-go/operator/informers/externalversions/operator/v1 github.com/openshift/client-go/operator/informers/externalversions/operator/v1alpha1 github.com/openshift/client-go/operator/listers/operator/v1 github.com/openshift/client-go/operator/listers/operator/v1alpha1 -# github.com/openshift/machine-config-operator v0.0.1-0.20230118083703-fc27a2bdaa85 -## explicit; go 1.18 +# github.com/openshift/library-go v0.0.0-20231020125025-211b32f1a1f2 +## explicit; go 1.20 +github.com/openshift/library-go/pkg/controller/factory +github.com/openshift/library-go/pkg/crypto +github.com/openshift/library-go/pkg/operator/condition +github.com/openshift/library-go/pkg/operator/configobserver +github.com/openshift/library-go/pkg/operator/configobserver/featuregates +github.com/openshift/library-go/pkg/operator/events +github.com/openshift/library-go/pkg/operator/management +github.com/openshift/library-go/pkg/operator/resource/resourceapply +github.com/openshift/library-go/pkg/operator/resource/resourcehelper +github.com/openshift/library-go/pkg/operator/resource/resourcemerge +github.com/openshift/library-go/pkg/operator/resource/resourceread +github.com/openshift/library-go/pkg/operator/resourcesynccontroller +github.com/openshift/library-go/pkg/operator/v1helpers +# github.com/openshift/machine-config-operator v0.0.1-0.20231024085435-7e1fb719c1ba +## explicit; go 1.20 github.com/openshift/machine-config-operator/internal/clients github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1 github.com/openshift/machine-config-operator/pkg/controller/common @@ -408,6 +517,7 @@ github.com/openshift/machine-config-operator/pkg/generated/informers/externalver github.com/openshift/machine-config-operator/pkg/generated/informers/externalversions/machineconfiguration.openshift.io github.com/openshift/machine-config-operator/pkg/generated/informers/externalversions/machineconfiguration.openshift.io/v1 github.com/openshift/machine-config-operator/pkg/generated/listers/machineconfiguration.openshift.io/v1 +github.com/openshift/machine-config-operator/pkg/version # github.com/peterbourgon/diskv v2.0.1+incompatible ## explicit github.com/peterbourgon/diskv @@ -436,6 +546,9 @@ github.com/prometheus/common/model github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util +# github.com/robfig/cron v1.2.0 +## explicit +github.com/robfig/cron # github.com/russross/blackfriday/v2 v2.1.0 ## explicit github.com/russross/blackfriday/v2 @@ -834,6 +947,9 @@ k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/netutil k8s.io/apimachinery/third_party/forked/golang/reflect +# k8s.io/apiserver v0.28.3 +## explicit; go 1.20 +k8s.io/apiserver/pkg/authentication/user # k8s.io/cli-runtime v0.28.3 ## explicit; go 1.20 k8s.io/cli-runtime/pkg/genericclioptions @@ -1209,6 +1325,9 @@ k8s.io/code-generator/third_party/forked/golang/reflect ## explicit; go 1.20 k8s.io/component-base/config k8s.io/component-base/config/v1alpha1 +k8s.io/component-base/metrics +k8s.io/component-base/metrics/legacyregistry +k8s.io/component-base/metrics/prometheusextension k8s.io/component-base/version # k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01 ## explicit; go 1.13 @@ -1230,6 +1349,13 @@ k8s.io/klog/v2/internal/clock k8s.io/klog/v2/internal/dbg k8s.io/klog/v2/internal/serialize k8s.io/klog/v2/internal/severity +# k8s.io/kube-aggregator v0.27.4 +## explicit; go 1.20 +k8s.io/kube-aggregator/pkg/apis/apiregistration +k8s.io/kube-aggregator/pkg/apis/apiregistration/v1 +k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1 +k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme +k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/typed/apiregistration/v1 # k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 ## explicit; go 1.19 k8s.io/kube-openapi/cmd/openapi-gen/args @@ -1257,8 +1383,8 @@ k8s.io/kubectl/pkg/util/openapi k8s.io/kubectl/pkg/util/templates k8s.io/kubectl/pkg/util/term k8s.io/kubectl/pkg/validation -# k8s.io/kubelet v0.25.1 -## explicit; go 1.19 +# k8s.io/kubelet v0.27.7 +## explicit; go 1.20 k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1 # k8s.io/utils v0.0.0-20230726121419-3b25d923346b ## explicit; go 1.18 @@ -1330,6 +1456,12 @@ sigs.k8s.io/controller-runtime/pkg/webhook/internal/metrics ## explicit; go 1.18 sigs.k8s.io/json sigs.k8s.io/json/internal/golang/encoding/json +# sigs.k8s.io/kube-storage-version-migrator v0.0.6-0.20230721195810-5c8923c5ff96 +## explicit; go 1.20 +sigs.k8s.io/kube-storage-version-migrator/pkg/apis/migration/v1alpha1 +sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset +sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/scheme +sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/typed/migration/v1alpha1 # sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 ## explicit; go 1.19 sigs.k8s.io/kustomize/api/filters/annotations diff --git a/vendor/sigs.k8s.io/kube-storage-version-migrator/LICENSE b/vendor/sigs.k8s.io/kube-storage-version-migrator/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/sigs.k8s.io/kube-storage-version-migrator/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/apis/migration/v1alpha1/doc.go b/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/apis/migration/v1alpha1/doc.go new file mode 100644 index 000000000..da6d19a24 --- /dev/null +++ b/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/apis/migration/v1alpha1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package + +// +groupName=migration.k8s.io +package v1alpha1 diff --git a/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/apis/migration/v1alpha1/register.go b/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/apis/migration/v1alpha1/register.go new file mode 100644 index 000000000..f400f747e --- /dev/null +++ b/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/apis/migration/v1alpha1/register.go @@ -0,0 +1,54 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "migration.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &StorageVersionMigration{}, + &StorageVersionMigrationList{}, + &StorageState{}, + &StorageStateList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/apis/migration/v1alpha1/types.go b/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/apis/migration/v1alpha1/types.go new file mode 100644 index 000000000..427350b1e --- /dev/null +++ b/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/apis/migration/v1alpha1/types.go @@ -0,0 +1,186 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +genclient:nonNamespaced + +// StorageVersionMigration represents a migration of stored data to the latest +// storage version. +type StorageVersionMigration struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + // Specification of the migration. + // +optional + Spec StorageVersionMigrationSpec `json:"spec,omitempty"` + // Status of the migration. + // +optional + Status StorageVersionMigrationStatus `json:"status,omitempty"` +} + +// The names of the group, the version, and the resource. +type GroupVersionResource struct { + // The name of the group. + Group string `json:"group,omitempty"` + // The name of the version. + Version string `json:"version,omitempty"` + // The name of the resource. + Resource string `json:"resource,omitempty"` +} + +// Spec of the storage version migration. +type StorageVersionMigrationSpec struct { + // The resource that is being migrated. The migrator sends requests to + // the endpoint serving the resource. + // Immutable. + Resource GroupVersionResource `json:"resource"` + // The token used in the list options to get the next chunk of objects + // to migrate. When the .status.conditions indicates the migration is + // "Running", users can use this token to check the progress of the + // migration. + // +optional + ContinueToken string `json:"continueToken,omitempty"` + // TODO: consider recording the storage version hash when the migration + // is created. It can avoid races. +} + +type MigrationConditionType string + +const ( + // Indicates that the migration is running. + MigrationRunning MigrationConditionType = "Running" + // Indicates that the migration has completed successfully. + MigrationSucceeded MigrationConditionType = "Succeeded" + // Indicates that the migration has failed. + MigrationFailed MigrationConditionType = "Failed" +) + +// Describes the state of a migration at a certain point. +type MigrationCondition struct { + // Type of the condition. + Type MigrationConditionType `json:"type"` + // Status of the condition, one of True, False, Unknown. + Status corev1.ConditionStatus `json:"status"` + // The last time this condition was updated. + // +optional + LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty"` + // The reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty"` + // A human readable message indicating details about the transition. + // +optional + Message string `json:"message,omitempty"` +} + +// Status of the storage version migration. +type StorageVersionMigrationStatus struct { + // The latest available observations of the migration's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []MigrationCondition `json:"conditions,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// StorageVersionMigrationList is a collection of storage version migrations. +type StorageVersionMigrationList struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + // Items is the list of StorageVersionMigration + Items []StorageVersionMigration `json:"items"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +genclient:nonNamespaced + +// The state of the storage of a specific resource. +type StorageState struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + // Specification of the storage state. + // +optional + Spec StorageStateSpec `json:"spec,omitempty"` + // Status of the storage state. + // +optional + Status StorageStateStatus `json:"status,omitempty"` +} + +// The names of the group and the resource. +type GroupResource struct { + // The name of the group. + Group string `json:"group,omitempty"` + // The name of the resource. + Resource string `json:"resource,omitempty"` +} + +// Specification of the storage state. +type StorageStateSpec struct { + // The resource this storageState is about. + Resource GroupResource `json:"resource,omitempty"` +} + +// Unknown is a valid value in persistedStorageVersionHashes. +const Unknown = "Unknown" + +// Status of the storage state. +type StorageStateStatus struct { + // The hash values of storage versions that persisted instances of + // spec.resource might still be encoded in. + // "Unknown" is a valid value in the list, and is the default value. + // It is not safe to upgrade or downgrade to an apiserver binary that does not + // support all versions listed in this field, or if "Unknown" is listed. + // Once the storage version migration for this resource has completed, the + // value of this field is refined to only contain the + // currentStorageVersionHash. + // Once the apiserver has changed the storage version, the new storage version + // is appended to the list. + // +optional + PersistedStorageVersionHashes []string `json:"persistedStorageVersionHashes,omitempty"` + // The hash value of the current storage version, as shown in the discovery + // document served by the API server. + // Storage Version is the version to which objects are converted to + // before persisted. + // +optional + CurrentStorageVersionHash string `json:"currentStorageVersionHash,omitempty"` + // LastHeartbeatTime is the last time the storage migration triggering + // controller checks the storage version hash of this resource in the + // discovery document and updates this field. + // +optional + LastHeartbeatTime metav1.Time `json:"lastHeartbeatTime,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// StorageStateList is a collection of storage state. +type StorageStateList struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + // Items is the list of StorageState + Items []StorageState `json:"items"` +} diff --git a/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/apis/migration/v1alpha1/zz_generated.deepcopy.go b/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/apis/migration/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 000000000..da9613aef --- /dev/null +++ b/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/apis/migration/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,276 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupResource) DeepCopyInto(out *GroupResource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupResource. +func (in *GroupResource) DeepCopy() *GroupResource { + if in == nil { + return nil + } + out := new(GroupResource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupVersionResource) DeepCopyInto(out *GroupVersionResource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupVersionResource. +func (in *GroupVersionResource) DeepCopy() *GroupVersionResource { + if in == nil { + return nil + } + out := new(GroupVersionResource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MigrationCondition) DeepCopyInto(out *MigrationCondition) { + *out = *in + in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MigrationCondition. +func (in *MigrationCondition) DeepCopy() *MigrationCondition { + if in == nil { + return nil + } + out := new(MigrationCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageState) DeepCopyInto(out *StorageState) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageState. +func (in *StorageState) DeepCopy() *StorageState { + if in == nil { + return nil + } + out := new(StorageState) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StorageState) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageStateList) DeepCopyInto(out *StorageStateList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]StorageState, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageStateList. +func (in *StorageStateList) DeepCopy() *StorageStateList { + if in == nil { + return nil + } + out := new(StorageStateList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StorageStateList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageStateSpec) DeepCopyInto(out *StorageStateSpec) { + *out = *in + out.Resource = in.Resource + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageStateSpec. +func (in *StorageStateSpec) DeepCopy() *StorageStateSpec { + if in == nil { + return nil + } + out := new(StorageStateSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageStateStatus) DeepCopyInto(out *StorageStateStatus) { + *out = *in + if in.PersistedStorageVersionHashes != nil { + in, out := &in.PersistedStorageVersionHashes, &out.PersistedStorageVersionHashes + *out = make([]string, len(*in)) + copy(*out, *in) + } + in.LastHeartbeatTime.DeepCopyInto(&out.LastHeartbeatTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageStateStatus. +func (in *StorageStateStatus) DeepCopy() *StorageStateStatus { + if in == nil { + return nil + } + out := new(StorageStateStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageVersionMigration) DeepCopyInto(out *StorageVersionMigration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageVersionMigration. +func (in *StorageVersionMigration) DeepCopy() *StorageVersionMigration { + if in == nil { + return nil + } + out := new(StorageVersionMigration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StorageVersionMigration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageVersionMigrationList) DeepCopyInto(out *StorageVersionMigrationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]StorageVersionMigration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageVersionMigrationList. +func (in *StorageVersionMigrationList) DeepCopy() *StorageVersionMigrationList { + if in == nil { + return nil + } + out := new(StorageVersionMigrationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StorageVersionMigrationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageVersionMigrationSpec) DeepCopyInto(out *StorageVersionMigrationSpec) { + *out = *in + out.Resource = in.Resource + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageVersionMigrationSpec. +func (in *StorageVersionMigrationSpec) DeepCopy() *StorageVersionMigrationSpec { + if in == nil { + return nil + } + out := new(StorageVersionMigrationSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageVersionMigrationStatus) DeepCopyInto(out *StorageVersionMigrationStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]MigrationCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageVersionMigrationStatus. +func (in *StorageVersionMigrationStatus) DeepCopy() *StorageVersionMigrationStatus { + if in == nil { + return nil + } + out := new(StorageVersionMigrationStatus) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/clientset.go b/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/clientset.go new file mode 100644 index 000000000..c92124b7a --- /dev/null +++ b/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/clientset.go @@ -0,0 +1,120 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package clientset + +import ( + "fmt" + "net/http" + + discovery "k8s.io/client-go/discovery" + rest "k8s.io/client-go/rest" + flowcontrol "k8s.io/client-go/util/flowcontrol" + migrationv1alpha1 "sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/typed/migration/v1alpha1" +) + +type Interface interface { + Discovery() discovery.DiscoveryInterface + MigrationV1alpha1() migrationv1alpha1.MigrationV1alpha1Interface +} + +// Clientset contains the clients for groups. +type Clientset struct { + *discovery.DiscoveryClient + migrationV1alpha1 *migrationv1alpha1.MigrationV1alpha1Client +} + +// MigrationV1alpha1 retrieves the MigrationV1alpha1Client +func (c *Clientset) MigrationV1alpha1() migrationv1alpha1.MigrationV1alpha1Interface { + return c.migrationV1alpha1 +} + +// Discovery retrieves the DiscoveryClient +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + if c == nil { + return nil + } + return c.DiscoveryClient +} + +// NewForConfig creates a new Clientset for the given config. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfig will generate a rate-limiter in configShallowCopy. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*Clientset, error) { + configShallowCopy := *c + + if configShallowCopy.UserAgent == "" { + configShallowCopy.UserAgent = rest.DefaultKubernetesUserAgent() + } + + // share the transport between all clients + httpClient, err := rest.HTTPClientFor(&configShallowCopy) + if err != nil { + return nil, err + } + + return NewForConfigAndClient(&configShallowCopy, httpClient) +} + +// NewForConfigAndClient creates a new Clientset for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfigAndClient will generate a rate-limiter in configShallowCopy. +func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, error) { + configShallowCopy := *c + if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { + if configShallowCopy.Burst <= 0 { + return nil, fmt.Errorf("burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0") + } + configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) + } + + var cs Clientset + var err error + cs.migrationV1alpha1, err = migrationv1alpha1.NewForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } + + cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } + return &cs, nil +} + +// NewForConfigOrDie creates a new Clientset for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *Clientset { + cs, err := NewForConfig(c) + if err != nil { + panic(err) + } + return cs +} + +// New creates a new Clientset for the given RESTClient. +func New(c rest.Interface) *Clientset { + var cs Clientset + cs.migrationV1alpha1 = migrationv1alpha1.New(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClient(c) + return &cs +} diff --git a/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/doc.go b/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/doc.go new file mode 100644 index 000000000..ee865e56d --- /dev/null +++ b/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated clientset. +package clientset diff --git a/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/scheme/doc.go b/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/scheme/doc.go new file mode 100644 index 000000000..7dc375616 --- /dev/null +++ b/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/scheme/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package contains the scheme of the automatically generated clientset. +package scheme diff --git a/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/scheme/register.go b/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/scheme/register.go new file mode 100644 index 000000000..32bd297c5 --- /dev/null +++ b/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/scheme/register.go @@ -0,0 +1,56 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package scheme + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + migrationv1alpha1 "sigs.k8s.io/kube-storage-version-migrator/pkg/apis/migration/v1alpha1" +) + +var Scheme = runtime.NewScheme() +var Codecs = serializer.NewCodecFactory(Scheme) +var ParameterCodec = runtime.NewParameterCodec(Scheme) +var localSchemeBuilder = runtime.SchemeBuilder{ + migrationv1alpha1.AddToScheme, +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(Scheme)) +} diff --git a/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/typed/migration/v1alpha1/doc.go b/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/typed/migration/v1alpha1/doc.go new file mode 100644 index 000000000..df51baa4d --- /dev/null +++ b/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/typed/migration/v1alpha1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1alpha1 diff --git a/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/typed/migration/v1alpha1/generated_expansion.go b/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/typed/migration/v1alpha1/generated_expansion.go new file mode 100644 index 000000000..3ce4f5753 --- /dev/null +++ b/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/typed/migration/v1alpha1/generated_expansion.go @@ -0,0 +1,23 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +type StorageStateExpansion interface{} + +type StorageVersionMigrationExpansion interface{} diff --git a/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/typed/migration/v1alpha1/migration_client.go b/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/typed/migration/v1alpha1/migration_client.go new file mode 100644 index 000000000..9deb423ce --- /dev/null +++ b/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/typed/migration/v1alpha1/migration_client.go @@ -0,0 +1,112 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "net/http" + + rest "k8s.io/client-go/rest" + v1alpha1 "sigs.k8s.io/kube-storage-version-migrator/pkg/apis/migration/v1alpha1" + "sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/scheme" +) + +type MigrationV1alpha1Interface interface { + RESTClient() rest.Interface + StorageStatesGetter + StorageVersionMigrationsGetter +} + +// MigrationV1alpha1Client is used to interact with features provided by the migration.k8s.io group. +type MigrationV1alpha1Client struct { + restClient rest.Interface +} + +func (c *MigrationV1alpha1Client) StorageStates() StorageStateInterface { + return newStorageStates(c) +} + +func (c *MigrationV1alpha1Client) StorageVersionMigrations() StorageVersionMigrationInterface { + return newStorageVersionMigrations(c) +} + +// NewForConfig creates a new MigrationV1alpha1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*MigrationV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new MigrationV1alpha1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*MigrationV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) + if err != nil { + return nil, err + } + return &MigrationV1alpha1Client{client}, nil +} + +// NewForConfigOrDie creates a new MigrationV1alpha1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *MigrationV1alpha1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new MigrationV1alpha1Client for the given RESTClient. +func New(c rest.Interface) *MigrationV1alpha1Client { + return &MigrationV1alpha1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1alpha1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *MigrationV1alpha1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/typed/migration/v1alpha1/storagestate.go b/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/typed/migration/v1alpha1/storagestate.go new file mode 100644 index 000000000..8345b3619 --- /dev/null +++ b/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/typed/migration/v1alpha1/storagestate.go @@ -0,0 +1,184 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + "time" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + v1alpha1 "sigs.k8s.io/kube-storage-version-migrator/pkg/apis/migration/v1alpha1" + scheme "sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/scheme" +) + +// StorageStatesGetter has a method to return a StorageStateInterface. +// A group's client should implement this interface. +type StorageStatesGetter interface { + StorageStates() StorageStateInterface +} + +// StorageStateInterface has methods to work with StorageState resources. +type StorageStateInterface interface { + Create(ctx context.Context, storageState *v1alpha1.StorageState, opts v1.CreateOptions) (*v1alpha1.StorageState, error) + Update(ctx context.Context, storageState *v1alpha1.StorageState, opts v1.UpdateOptions) (*v1alpha1.StorageState, error) + UpdateStatus(ctx context.Context, storageState *v1alpha1.StorageState, opts v1.UpdateOptions) (*v1alpha1.StorageState, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.StorageState, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.StorageStateList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.StorageState, err error) + StorageStateExpansion +} + +// storageStates implements StorageStateInterface +type storageStates struct { + client rest.Interface +} + +// newStorageStates returns a StorageStates +func newStorageStates(c *MigrationV1alpha1Client) *storageStates { + return &storageStates{ + client: c.RESTClient(), + } +} + +// Get takes name of the storageState, and returns the corresponding storageState object, and an error if there is any. +func (c *storageStates) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.StorageState, err error) { + result = &v1alpha1.StorageState{} + err = c.client.Get(). + Resource("storagestates"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of StorageStates that match those selectors. +func (c *storageStates) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.StorageStateList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.StorageStateList{} + err = c.client.Get(). + Resource("storagestates"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested storageStates. +func (c *storageStates) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("storagestates"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a storageState and creates it. Returns the server's representation of the storageState, and an error, if there is any. +func (c *storageStates) Create(ctx context.Context, storageState *v1alpha1.StorageState, opts v1.CreateOptions) (result *v1alpha1.StorageState, err error) { + result = &v1alpha1.StorageState{} + err = c.client.Post(). + Resource("storagestates"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(storageState). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a storageState and updates it. Returns the server's representation of the storageState, and an error, if there is any. +func (c *storageStates) Update(ctx context.Context, storageState *v1alpha1.StorageState, opts v1.UpdateOptions) (result *v1alpha1.StorageState, err error) { + result = &v1alpha1.StorageState{} + err = c.client.Put(). + Resource("storagestates"). + Name(storageState.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(storageState). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *storageStates) UpdateStatus(ctx context.Context, storageState *v1alpha1.StorageState, opts v1.UpdateOptions) (result *v1alpha1.StorageState, err error) { + result = &v1alpha1.StorageState{} + err = c.client.Put(). + Resource("storagestates"). + Name(storageState.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(storageState). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the storageState and deletes it. Returns an error if one occurs. +func (c *storageStates) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Resource("storagestates"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *storageStates) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("storagestates"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched storageState. +func (c *storageStates) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.StorageState, err error) { + result = &v1alpha1.StorageState{} + err = c.client.Patch(pt). + Resource("storagestates"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/typed/migration/v1alpha1/storageversionmigration.go b/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/typed/migration/v1alpha1/storageversionmigration.go new file mode 100644 index 000000000..34fa3a987 --- /dev/null +++ b/vendor/sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/typed/migration/v1alpha1/storageversionmigration.go @@ -0,0 +1,184 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + "time" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + v1alpha1 "sigs.k8s.io/kube-storage-version-migrator/pkg/apis/migration/v1alpha1" + scheme "sigs.k8s.io/kube-storage-version-migrator/pkg/clients/clientset/scheme" +) + +// StorageVersionMigrationsGetter has a method to return a StorageVersionMigrationInterface. +// A group's client should implement this interface. +type StorageVersionMigrationsGetter interface { + StorageVersionMigrations() StorageVersionMigrationInterface +} + +// StorageVersionMigrationInterface has methods to work with StorageVersionMigration resources. +type StorageVersionMigrationInterface interface { + Create(ctx context.Context, storageVersionMigration *v1alpha1.StorageVersionMigration, opts v1.CreateOptions) (*v1alpha1.StorageVersionMigration, error) + Update(ctx context.Context, storageVersionMigration *v1alpha1.StorageVersionMigration, opts v1.UpdateOptions) (*v1alpha1.StorageVersionMigration, error) + UpdateStatus(ctx context.Context, storageVersionMigration *v1alpha1.StorageVersionMigration, opts v1.UpdateOptions) (*v1alpha1.StorageVersionMigration, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.StorageVersionMigration, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.StorageVersionMigrationList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.StorageVersionMigration, err error) + StorageVersionMigrationExpansion +} + +// storageVersionMigrations implements StorageVersionMigrationInterface +type storageVersionMigrations struct { + client rest.Interface +} + +// newStorageVersionMigrations returns a StorageVersionMigrations +func newStorageVersionMigrations(c *MigrationV1alpha1Client) *storageVersionMigrations { + return &storageVersionMigrations{ + client: c.RESTClient(), + } +} + +// Get takes name of the storageVersionMigration, and returns the corresponding storageVersionMigration object, and an error if there is any. +func (c *storageVersionMigrations) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.StorageVersionMigration, err error) { + result = &v1alpha1.StorageVersionMigration{} + err = c.client.Get(). + Resource("storageversionmigrations"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of StorageVersionMigrations that match those selectors. +func (c *storageVersionMigrations) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.StorageVersionMigrationList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.StorageVersionMigrationList{} + err = c.client.Get(). + Resource("storageversionmigrations"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested storageVersionMigrations. +func (c *storageVersionMigrations) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("storageversionmigrations"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a storageVersionMigration and creates it. Returns the server's representation of the storageVersionMigration, and an error, if there is any. +func (c *storageVersionMigrations) Create(ctx context.Context, storageVersionMigration *v1alpha1.StorageVersionMigration, opts v1.CreateOptions) (result *v1alpha1.StorageVersionMigration, err error) { + result = &v1alpha1.StorageVersionMigration{} + err = c.client.Post(). + Resource("storageversionmigrations"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(storageVersionMigration). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a storageVersionMigration and updates it. Returns the server's representation of the storageVersionMigration, and an error, if there is any. +func (c *storageVersionMigrations) Update(ctx context.Context, storageVersionMigration *v1alpha1.StorageVersionMigration, opts v1.UpdateOptions) (result *v1alpha1.StorageVersionMigration, err error) { + result = &v1alpha1.StorageVersionMigration{} + err = c.client.Put(). + Resource("storageversionmigrations"). + Name(storageVersionMigration.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(storageVersionMigration). + Do(ctx). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *storageVersionMigrations) UpdateStatus(ctx context.Context, storageVersionMigration *v1alpha1.StorageVersionMigration, opts v1.UpdateOptions) (result *v1alpha1.StorageVersionMigration, err error) { + result = &v1alpha1.StorageVersionMigration{} + err = c.client.Put(). + Resource("storageversionmigrations"). + Name(storageVersionMigration.Name). + SubResource("status"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(storageVersionMigration). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the storageVersionMigration and deletes it. Returns an error if one occurs. +func (c *storageVersionMigrations) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Resource("storageversionmigrations"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *storageVersionMigrations) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("storageversionmigrations"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched storageVersionMigration. +func (c *storageVersionMigrations) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.StorageVersionMigration, err error) { + result = &v1alpha1.StorageVersionMigration{} + err = c.client.Patch(pt). + Resource("storageversionmigrations"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} From 98277ec89d0860da258cc68298f833fb79a19504 Mon Sep 17 00:00:00 2001 From: Salvatore Daniele Date: Thu, 6 Jun 2024 09:18:54 -0400 Subject: [PATCH 3/3] Init klog flags Due to updating the machine config operator, we removed an indirect dependency on github.com/golang/glog. This package provides an init() method which sets a number of flags that we expect to be present [1]. To ensure compatability with the updates, we can explicitly add these flags with the klog.init() method [2]. It is better to include packages that are required to keep the API consistent explicitly. [1] https://github.com/golang/glog/blob/424d2337a5299a465c8a8228fc3ba4b1c28337a2/glog.go#L398-L404 [2] https://github.com/kubernetes/klog#:~:text=Use%20klog.InitFlags(nil)%20explicitly%20for%20initializing%20global%20flags%20as%20we%20no%20longer%20use%20init()%20method%20to%20register%20the%20flags Signed-off-by: Salvatore Daniele --- cmd/webhook/main.go | 2 ++ go.mod | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/cmd/webhook/main.go b/cmd/webhook/main.go index a83e08326..a43123cfc 100644 --- a/cmd/webhook/main.go +++ b/cmd/webhook/main.go @@ -5,6 +5,7 @@ import ( "os" "github.com/spf13/cobra" + "k8s.io/klog/v2" "sigs.k8s.io/controller-runtime/pkg/log" snolog "github.com/k8snetworkplumbingwg/sriov-network-operator/pkg/log" @@ -23,6 +24,7 @@ var ( ) func init() { + klog.InitFlags(nil) snolog.BindFlags(flag.CommandLine) rootCmd.PersistentFlags().AddGoFlagSet(flag.CommandLine) } diff --git a/go.mod b/go.mod index 572df7445..0543e886d 100644 --- a/go.mod +++ b/go.mod @@ -35,6 +35,7 @@ require ( k8s.io/apimachinery v0.28.3 k8s.io/client-go v0.28.3 k8s.io/code-generator v0.28.3 + k8s.io/klog/v2 v2.100.1 k8s.io/kubectl v0.28.3 k8s.io/utils v0.0.0-20230726121419-3b25d923346b sigs.k8s.io/controller-runtime v0.16.3 @@ -150,7 +151,6 @@ require ( k8s.io/cli-runtime v0.28.3 // indirect k8s.io/component-base v0.28.3 // indirect k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01 // indirect - k8s.io/klog/v2 v2.100.1 // indirect k8s.io/kube-aggregator v0.27.4 // indirect k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect k8s.io/kubelet v0.27.7 // indirect