From 1f7aa1c0361f32b897f55541bf4f3a66ade53f1b Mon Sep 17 00:00:00 2001 From: Simon Ferquel Date: Tue, 24 Apr 2018 12:28:08 +0200 Subject: [PATCH] Add filter support for stack ps and services with Kubernetes Signed-off-by: Mathieu Champlon --- cli/command/stack/kubernetes/client.go | 5 + cli/command/stack/kubernetes/conversion.go | 133 +- .../stack/kubernetes/conversion_test.go | 4 +- cli/command/stack/kubernetes/ps.go | 83 +- cli/command/stack/kubernetes/services.go | 103 +- cli/command/stack/kubernetes/services_test.go | 115 + cli/command/stack/ps.go | 1 - cli/command/stack/services.go | 1 - docs/reference/commandline/stack_services.md | 17 +- .../pkg/apimachinery/announced/announced.go | 99 - .../apimachinery/announced/group_factory.go | 255 - .../apimachinery/pkg/apimachinery/doc.go | 20 - .../pkg/apimachinery/registered/registered.go | 336 - .../apimachinery/pkg/apimachinery/types.go | 87 - .../pkg/api/annotation_key_constants.go | 94 - vendor/k8s.io/kubernetes/pkg/api/doc.go | 24 - .../kubernetes/pkg/api/field_constants.go | 38 - vendor/k8s.io/kubernetes/pkg/api/json.go | 28 - .../kubernetes/pkg/api/objectreference.go | 34 - vendor/k8s.io/kubernetes/pkg/api/register.go | 124 - vendor/k8s.io/kubernetes/pkg/api/resource.go | 62 - vendor/k8s.io/kubernetes/pkg/api/taint.go | 36 - .../k8s.io/kubernetes/pkg/api/toleration.go | 30 - vendor/k8s.io/kubernetes/pkg/api/types.go | 4287 ----------- .../pkg/api/zz_generated.deepcopy.go | 6320 ----------------- 25 files changed, 364 insertions(+), 11972 deletions(-) create mode 100644 cli/command/stack/kubernetes/services_test.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/apimachinery/announced/announced.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/apimachinery/announced/group_factory.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/apimachinery/doc.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/apimachinery/registered/registered.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/apimachinery/types.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/api/annotation_key_constants.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/api/doc.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/api/field_constants.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/api/json.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/api/objectreference.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/api/register.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/api/resource.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/api/taint.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/api/toleration.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/api/types.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/api/zz_generated.deepcopy.go diff --git a/cli/command/stack/kubernetes/client.go b/cli/command/stack/kubernetes/client.go index 76361e649293..5024c92341a2 100644 --- a/cli/command/stack/kubernetes/client.go +++ b/cli/command/stack/kubernetes/client.go @@ -76,6 +76,11 @@ func (s *Factory) ReplicaSets() typesappsv1beta2.ReplicaSetInterface { return s.appsClientSet.ReplicaSets(s.namespace) } +// DaemonSets returns a client for kubernetes daemon sets +func (s *Factory) DaemonSets() typesappsv1beta2.DaemonSetInterface { + return s.appsClientSet.DaemonSets(s.namespace) +} + // Stacks returns a client for Docker's Stack on Kubernetes func (s *Factory) Stacks(allNamespaces bool) (StackClient, error) { version, err := kubernetes.GetStackAPIVersion(s.clientSet) diff --git a/cli/command/stack/kubernetes/conversion.go b/cli/command/stack/kubernetes/conversion.go index e5becda85dda..83986661ec6a 100644 --- a/cli/command/stack/kubernetes/conversion.go +++ b/cli/command/stack/kubernetes/conversion.go @@ -2,10 +2,13 @@ package kubernetes import ( "fmt" + "sort" + "strings" "time" "github.com/docker/cli/cli/command/formatter" "github.com/docker/cli/kubernetes/labels" + "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/swarm" appsv1beta2 "k8s.io/api/apps/v1beta2" apiv1 "k8s.io/api/core/v1" @@ -65,13 +68,43 @@ func toSwarmProtocol(protocol apiv1.Protocol) swarm.PortConfigProtocol { return swarm.PortConfigProtocol("unknown") } -func fetchPods(namespace string, pods corev1.PodInterface) ([]apiv1.Pod, error) { - labelSelector := labels.SelectorForStack(namespace) - podsList, err := pods.List(metav1.ListOptions{LabelSelector: labelSelector}) +func fetchPods(stackName string, pods corev1.PodInterface, f filters.Args) ([]apiv1.Pod, error) { + services := f.Get("service") + // for existing script compatibility, support either or _ format + stackNamePrefix := stackName + "_" + for _, s := range services { + if strings.HasPrefix(s, stackNamePrefix) { + services = append(services, strings.TrimPrefix(s, stackNamePrefix)) + } + } + listOpts := metav1.ListOptions{LabelSelector: labels.SelectorForStack(stackName, services...)} + var result []apiv1.Pod + podsList, err := pods.List(listOpts) if err != nil { return nil, err } - return podsList.Items, nil + nodes := f.Get("node") + for _, pod := range podsList.Items { + if filterPod(pod, nodes) && + // name filter is done client side for matching partials + f.FuzzyMatch("name", stackNamePrefix+pod.Name) { + + result = append(result, pod) + } + } + return result, nil +} + +func filterPod(pod apiv1.Pod, nodes []string) bool { + if len(nodes) == 0 { + return true + } + for _, name := range nodes { + if pod.Spec.NodeName == name { + return true + } + } + return false } func getContainerImage(containers []apiv1.Container) string { @@ -121,56 +154,76 @@ const ( publishedOnRandomPortSuffix = "-random-ports" ) -// Replicas conversion -func replicasToServices(replicas *appsv1beta2.ReplicaSetList, services *apiv1.ServiceList) ([]swarm.Service, map[string]formatter.ServiceListInfo, error) { +func convertToServices(replicas *appsv1beta2.ReplicaSetList, daemons *appsv1beta2.DaemonSetList, services *apiv1.ServiceList) ([]swarm.Service, map[string]formatter.ServiceListInfo, error) { result := make([]swarm.Service, len(replicas.Items)) - infos := make(map[string]formatter.ServiceListInfo, len(replicas.Items)) + infos := make(map[string]formatter.ServiceListInfo, len(replicas.Items)+len(daemons.Items)) for i, r := range replicas.Items { - serviceName := r.Labels[labels.ForServiceName] - serviceHeadless, ok := findService(services, serviceName) - if !ok { - return nil, nil, fmt.Errorf("could not find service '%s'", serviceName) - } - stack, ok := serviceHeadless.Labels[labels.ForStackName] - if ok { - stack += "_" - } - uid := string(serviceHeadless.UID) - s := swarm.Service{ - ID: uid, - Spec: swarm.ServiceSpec{ - Annotations: swarm.Annotations{ - Name: stack + serviceHeadless.Name, - }, - TaskTemplate: swarm.TaskSpec{ - ContainerSpec: &swarm.ContainerSpec{ - Image: getContainerImage(r.Spec.Template.Spec.Containers), - }, - }, - }, - } - if serviceNodePort, ok := findService(services, serviceName+publishedOnRandomPortSuffix); ok && serviceNodePort.Spec.Type == apiv1.ServiceTypeNodePort { - s.Endpoint = serviceEndpoint(serviceNodePort, swarm.PortConfigPublishModeHost) - } - if serviceLoadBalancer, ok := findService(services, serviceName+publishedServiceSuffix); ok && serviceLoadBalancer.Spec.Type == apiv1.ServiceTypeLoadBalancer { - s.Endpoint = serviceEndpoint(serviceLoadBalancer, swarm.PortConfigPublishModeIngress) + s, err := convertToService(r.Labels[labels.ForServiceName], services, r.Spec.Template.Spec.Containers) + if err != nil { + return nil, nil, err } - result[i] = s - infos[uid] = formatter.ServiceListInfo{ + result[i] = *s + infos[s.ID] = formatter.ServiceListInfo{ Mode: "replicated", Replicas: fmt.Sprintf("%d/%d", r.Status.AvailableReplicas, r.Status.Replicas), } } + for _, d := range daemons.Items { + s, err := convertToService(d.Labels[labels.ForServiceName], services, d.Spec.Template.Spec.Containers) + if err != nil { + return nil, nil, err + } + result = append(result, *s) + infos[s.ID] = formatter.ServiceListInfo{ + Mode: "global", + Replicas: fmt.Sprintf("%d/%d", d.Status.NumberReady, d.Status.DesiredNumberScheduled), + } + } + sort.Slice(result, func(i, j int) bool { + return result[i].ID < result[j].ID + }) return result, infos, nil } -func findService(services *apiv1.ServiceList, name string) (apiv1.Service, bool) { +func convertToService(serviceName string, services *apiv1.ServiceList, containers []apiv1.Container) (*swarm.Service, error) { + serviceHeadless, err := findService(services, serviceName) + if err != nil { + return nil, err + } + stack, ok := serviceHeadless.Labels[labels.ForStackName] + if ok { + stack += "_" + } + uid := string(serviceHeadless.UID) + s := &swarm.Service{ + ID: uid, + Spec: swarm.ServiceSpec{ + Annotations: swarm.Annotations{ + Name: stack + serviceHeadless.Name, + }, + TaskTemplate: swarm.TaskSpec{ + ContainerSpec: &swarm.ContainerSpec{ + Image: getContainerImage(containers), + }, + }, + }, + } + if serviceNodePort, err := findService(services, serviceName+publishedOnRandomPortSuffix); err == nil && serviceNodePort.Spec.Type == apiv1.ServiceTypeNodePort { + s.Endpoint = serviceEndpoint(serviceNodePort, swarm.PortConfigPublishModeHost) + } + if serviceLoadBalancer, err := findService(services, serviceName+publishedServiceSuffix); err == nil && serviceLoadBalancer.Spec.Type == apiv1.ServiceTypeLoadBalancer { + s.Endpoint = serviceEndpoint(serviceLoadBalancer, swarm.PortConfigPublishModeIngress) + } + return s, nil +} + +func findService(services *apiv1.ServiceList, name string) (apiv1.Service, error) { for _, s := range services.Items { if s.Name == name { - return s, true + return s, nil } } - return apiv1.Service{}, false + return apiv1.Service{}, fmt.Errorf("could not find service '%s'", name) } func serviceEndpoint(service apiv1.Service, publishMode swarm.PortConfigPublishMode) swarm.Endpoint { diff --git a/cli/command/stack/kubernetes/conversion_test.go b/cli/command/stack/kubernetes/conversion_test.go index abc4163e52a1..866557b3a7dd 100644 --- a/cli/command/stack/kubernetes/conversion_test.go +++ b/cli/command/stack/kubernetes/conversion_test.go @@ -19,7 +19,7 @@ func TestReplicasConversionNeedsAService(t *testing.T) { Items: []appsv1beta2.ReplicaSet{makeReplicaSet("unknown", 0, 0)}, } services := apiv1.ServiceList{} - _, _, err := replicasToServices(&replicas, &services) + _, _, err := convertToServices(&replicas, &appsv1beta2.DaemonSetList{}, &services) assert.ErrorContains(t, err, "could not find service") } @@ -124,7 +124,7 @@ func TestKubernetesServiceToSwarmServiceConversion(t *testing.T) { } for _, tc := range testCases { - swarmServices, listInfo, err := replicasToServices(tc.replicas, tc.services) + swarmServices, listInfo, err := convertToServices(tc.replicas, &appsv1beta2.DaemonSetList{}, tc.services) assert.NilError(t, err) assert.DeepEqual(t, tc.expectedServices, swarmServices) assert.DeepEqual(t, tc.expectedListInfo, listInfo) diff --git a/cli/command/stack/kubernetes/ps.go b/cli/command/stack/kubernetes/ps.go index ded56db19d5c..1895ac7cd793 100644 --- a/cli/command/stack/kubernetes/ps.go +++ b/cli/command/stack/kubernetes/ps.go @@ -10,17 +10,23 @@ import ( "github.com/docker/cli/cli/command/task" "github.com/docker/docker/api/types/swarm" apiv1 "k8s.io/api/core/v1" + apierrs "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/fields" corev1 "k8s.io/client-go/kubernetes/typed/core/v1" - "k8s.io/kubernetes/pkg/api" ) +var supportedPSFilters = map[string]bool{ + "name": true, + "service": true, + "node": true, +} + // RunPS is the kubernetes implementation of docker stack ps func RunPS(dockerCli *KubeCli, options options.PS) error { - namespace := options.Namespace - - // Initialize clients + filters := options.Filter.Value() + if err := filters.Validate(supportedPSFilters); err != nil { + return err + } client, err := dockerCli.composeClient() if err != nil { return err @@ -29,78 +35,71 @@ func RunPS(dockerCli *KubeCli, options options.PS) error { if err != nil { return err } - podsClient := client.Pods() - - // Fetch pods - if _, err := stacks.Get(namespace); err != nil { - return fmt.Errorf("nothing found in stack: %s", namespace) + stackName := options.Namespace + _, err = stacks.Get(stackName) + if apierrs.IsNotFound(err) { + return fmt.Errorf("nothing found in stack: %s", stackName) } - - pods, err := fetchPods(namespace, podsClient) if err != nil { return err } - + pods, err := fetchPods(stackName, client.Pods(), filters) + if err != nil { + return err + } if len(pods) == 0 { - return fmt.Errorf("nothing found in stack: %s", namespace) + return fmt.Errorf("nothing found in stack: %s", stackName) } + return printTasks(dockerCli, options, stackName, client, pods) +} +func printTasks(dockerCli command.Cli, options options.PS, namespace string, client corev1.NodesGetter, pods []apiv1.Pod) error { format := options.Format - if len(format) == 0 { + if format == "" { format = task.DefaultFormat(dockerCli.ConfigFile(), options.Quiet) } - nodeResolver := makeNodeResolver(options.NoResolve, client.Nodes()) tasks := make([]swarm.Task, len(pods)) for i, pod := range pods { tasks[i] = podToTask(pod) } - return print(dockerCli, namespace, tasks, pods, nodeResolver, !options.NoTrunc, options.Quiet, format) -} - -type idResolver func(name string) (string, error) - -func print(dockerCli command.Cli, namespace string, tasks []swarm.Task, pods []apiv1.Pod, nodeResolver idResolver, trunc, quiet bool, format string) error { sort.Stable(tasksBySlot(tasks)) names := map[string]string{} nodes := map[string]string{} - tasksCtx := formatter.Context{ - Output: dockerCli.Out(), - Format: formatter.NewTaskFormat(format, quiet), - Trunc: trunc, + n, err := client.Nodes().List(metav1.ListOptions{}) + if err != nil { + return err } - for i, task := range tasks { - nodeValue, err := nodeResolver(pods[i].Spec.NodeName) + nodeValue, err := resolveNode(pods[i].Spec.NodeName, n, options.NoResolve) if err != nil { return err } - names[task.ID] = fmt.Sprintf("%s_%s", namespace, pods[i].Name) nodes[task.ID] = nodeValue } + tasksCtx := formatter.Context{ + Output: dockerCli.Out(), + Format: formatter.NewTaskFormat(format, options.Quiet), + Trunc: !options.NoTrunc, + } + return formatter.TaskWrite(tasksCtx, tasks, names, nodes) } -func makeNodeResolver(noResolve bool, nodes corev1.NodeInterface) func(string) (string, error) { +func resolveNode(name string, nodes *apiv1.NodeList, noResolve bool) (string, error) { // Here we have a name and we need to resolve its identifier. To mimic swarm behavior - // we need to resolve the id when noresolve is set, otherwise we return the name. + // we need to resolve to the id when noResolve is set, otherwise we return the name. if noResolve { - return func(name string) (string, error) { - n, err := nodes.List(metav1.ListOptions{ - FieldSelector: fields.OneTermEqualSelector(api.ObjectNameField, name).String(), - }) - if err != nil { - return "", err - } - if len(n.Items) != 1 { - return "", fmt.Errorf("could not find node '%s'", name) + for _, node := range nodes.Items { + if node.Name == name { + return string(node.UID), nil } - return string(n.Items[0].UID), nil } + return "", fmt.Errorf("could not find node '%s'", name) } - return func(name string) (string, error) { return name, nil } + return name, nil } diff --git a/cli/command/stack/kubernetes/services.go b/cli/command/stack/kubernetes/services.go index b1a4a5f42f9d..9cf89f4f0fa8 100644 --- a/cli/command/stack/kubernetes/services.go +++ b/cli/command/stack/kubernetes/services.go @@ -2,43 +2,124 @@ package kubernetes import ( "fmt" + "sort" + "strings" "github.com/docker/cli/cli/command/formatter" "github.com/docker/cli/cli/command/stack/options" "github.com/docker/cli/kubernetes/labels" + "github.com/docker/docker/api/types/filters" + appsv1beta2 "k8s.io/api/apps/v1beta2" + corev1 "k8s.io/api/core/v1" + apierrs "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) +var supportedServicesFilters = map[string]bool{ + "mode": true, + "name": true, + "label": true, +} + +func generateSelector(labels map[string][]string) []string { + var result []string + for k, v := range labels { + switch len(v) { + case 0: + result = append(result, k) + case 1: + result = append(result, fmt.Sprintf("%s=%s", k, v[0])) + default: + sort.Strings(v) + result = append(result, fmt.Sprintf("%s in (%s)", k, strings.Join(v, ","))) + } + } + return result +} + +func parseLabelFilters(rawFilters []string) map[string][]string { + labels := map[string][]string{} + for _, rawLabel := range rawFilters { + v := strings.SplitN(rawLabel, "=", 2) + key := v[0] + if len(v) > 1 { + labels[key] = append(labels[key], v[1]) + } else if _, ok := labels[key]; !ok { + labels[key] = []string{} + } + } + return labels +} + +func generateLabelSelector(f filters.Args, stackName string) string { + names := f.Get("name") + sort.Strings(names) + for _, n := range names { + if strings.HasPrefix(n, stackName+"_") { + // also accepts with unprefixed service name (for compat with existing swarm scripts where service names are prefixed by stack names) + names = append(names, strings.TrimPrefix(n, stackName+"_")) + } + } + selectors := append(generateSelector(parseLabelFilters(f.Get("label"))), labels.SelectorForStack(stackName, names...)) + return strings.Join(selectors, ",") +} + +func getResourcesForServiceList(dockerCli *KubeCli, filters filters.Args, labelSelector string) (*appsv1beta2.ReplicaSetList, *appsv1beta2.DaemonSetList, *corev1.ServiceList, error) { + client, err := dockerCli.composeClient() + if err != nil { + return nil, nil, nil, err + } + modes := filters.Get("mode") + replicas := &appsv1beta2.ReplicaSetList{} + if len(modes) == 0 || filters.ExactMatch("mode", "replicated") { + if replicas, err = client.ReplicaSets().List(metav1.ListOptions{LabelSelector: labelSelector}); err != nil { + return nil, nil, nil, err + } + } + daemons := &appsv1beta2.DaemonSetList{} + if len(modes) == 0 || filters.ExactMatch("mode", "global") { + if daemons, err = client.DaemonSets().List(metav1.ListOptions{LabelSelector: labelSelector}); err != nil { + return nil, nil, nil, err + } + } + services, err := client.Services().List(metav1.ListOptions{LabelSelector: labelSelector}) + if err != nil { + return nil, nil, nil, err + } + return replicas, daemons, services, nil +} + // RunServices is the kubernetes implementation of docker stack services func RunServices(dockerCli *KubeCli, opts options.Services) error { - // Initialize clients + filters := opts.Filter.Value() + if err := filters.Validate(supportedServicesFilters); err != nil { + return err + } client, err := dockerCli.composeClient() if err != nil { return nil } stacks, err := client.Stacks(false) if err != nil { - return err - } - replicas := client.ReplicaSets() - - if _, err := stacks.Get(opts.Namespace); err != nil { - fmt.Fprintf(dockerCli.Err(), "Nothing found in stack: %s\n", opts.Namespace) return nil } - - replicasList, err := replicas.List(metav1.ListOptions{LabelSelector: labels.SelectorForStack(opts.Namespace)}) + stackName := opts.Namespace + _, err = stacks.Get(stackName) + if apierrs.IsNotFound(err) { + return fmt.Errorf("nothing found in stack: %s", stackName) + } if err != nil { return err } - servicesList, err := client.Services().List(metav1.ListOptions{LabelSelector: labels.SelectorForStack(opts.Namespace)}) + labelSelector := generateLabelSelector(filters, stackName) + replicasList, daemonsList, servicesList, err := getResourcesForServiceList(dockerCli, filters, labelSelector) if err != nil { return err } // Convert Replicas sets and kubernetes services to swam services and formatter informations - services, info, err := replicasToServices(replicasList, servicesList) + services, info, err := convertToServices(replicasList, daemonsList, servicesList) if err != nil { return err } diff --git a/cli/command/stack/kubernetes/services_test.go b/cli/command/stack/kubernetes/services_test.go new file mode 100644 index 000000000000..9537f203f98e --- /dev/null +++ b/cli/command/stack/kubernetes/services_test.go @@ -0,0 +1,115 @@ +package kubernetes + +import ( + "testing" + + "github.com/docker/docker/api/types/filters" + "github.com/gotestyourself/gotestyourself/assert" + "github.com/gotestyourself/gotestyourself/assert/cmp" +) + +func TestServiceFiltersLabelSelectorGen(t *testing.T) { + cases := []struct { + name string + stackName string + filters filters.Args + expectedSelectorParts []string + }{ + { + name: "no-filter", + stackName: "test", + filters: filters.NewArgs(), + expectedSelectorParts: []string{ + "com.docker.stack.namespace=test", + }, + }, + { + name: "single-name filter", + stackName: "test", + filters: filters.NewArgs(filters.KeyValuePair{Key: "name", Value: "svc-test"}), + expectedSelectorParts: []string{ + "com.docker.stack.namespace=test", + "com.docker.service.name=svc-test", + }, + }, + { + name: "multi-name filter", + stackName: "test", + filters: filters.NewArgs( + filters.KeyValuePair{Key: "name", Value: "svc-test"}, + filters.KeyValuePair{Key: "name", Value: "svc-test2"}, + ), + expectedSelectorParts: []string{ + "com.docker.stack.namespace=test", + "com.docker.service.name in (svc-test,svc-test2)", + }, + }, + { + name: "label present filter", + stackName: "test", + filters: filters.NewArgs( + filters.KeyValuePair{Key: "label", Value: "label-is-present"}, + ), + expectedSelectorParts: []string{ + "com.docker.stack.namespace=test", + "label-is-present", + }, + }, + { + name: "single value label filter", + stackName: "test", + filters: filters.NewArgs( + filters.KeyValuePair{Key: "label", Value: "label1=test"}, + ), + expectedSelectorParts: []string{ + "com.docker.stack.namespace=test", + "label1=test", + }, + }, + { + name: "multi value label filter", + stackName: "test", + filters: filters.NewArgs( + filters.KeyValuePair{Key: "label", Value: "label1=test"}, + filters.KeyValuePair{Key: "label", Value: "label1=test2"}, + ), + expectedSelectorParts: []string{ + "com.docker.stack.namespace=test", + "label1 in (test,test2)", + }, + }, + { + name: "2 different labels filter", + stackName: "test", + filters: filters.NewArgs( + filters.KeyValuePair{Key: "label", Value: "label1=test"}, + filters.KeyValuePair{Key: "label", Value: "label2=test2"}, + ), + expectedSelectorParts: []string{ + "com.docker.stack.namespace=test", + "label1=test", + "label2=test2", + }, + }, + { + name: "name filter with stackName prefix", + stackName: "test", + filters: filters.NewArgs( + filters.KeyValuePair{Key: "name", Value: "test_svc1"}, + ), + expectedSelectorParts: []string{ + "com.docker.stack.namespace=test", + "com.docker.service.name in (test_svc1,svc1)", + }, + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + result := generateLabelSelector(c.filters, c.stackName) + for _, toFind := range c.expectedSelectorParts { + assert.Assert(t, cmp.Contains(result, toFind)) + } + }) + } +} diff --git a/cli/command/stack/ps.go b/cli/command/stack/ps.go index be446c1f31ca..bb1bed2d1b9f 100644 --- a/cli/command/stack/ps.go +++ b/cli/command/stack/ps.go @@ -37,7 +37,6 @@ func newPsCommand(dockerCli command.Cli) *cobra.Command { flags.BoolVar(&opts.NoTrunc, "no-trunc", false, "Do not truncate output") flags.BoolVar(&opts.NoResolve, "no-resolve", false, "Do not map IDs to Names") flags.VarP(&opts.Filter, "filter", "f", "Filter output based on conditions provided") - flags.SetAnnotation("filter", "swarm", nil) flags.BoolVarP(&opts.Quiet, "quiet", "q", false, "Only display task IDs") flags.StringVar(&opts.Format, "format", "", "Pretty-print tasks using a Go template") kubernetes.AddNamespaceFlag(flags) diff --git a/cli/command/stack/services.go b/cli/command/stack/services.go index 09e9310e8f7e..e1fed1a0de34 100644 --- a/cli/command/stack/services.go +++ b/cli/command/stack/services.go @@ -37,7 +37,6 @@ func newServicesCommand(dockerCli command.Cli) *cobra.Command { flags.BoolVarP(&opts.Quiet, "quiet", "q", false, "Only display IDs") flags.StringVar(&opts.Format, "format", "", "Pretty-print services using a Go template") flags.VarP(&opts.Filter, "filter", "f", "Filter output based on conditions provided") - flags.SetAnnotation("filter", "swarm", nil) kubernetes.AddNamespaceFlag(flags) return cmd } diff --git a/docs/reference/commandline/stack_services.md b/docs/reference/commandline/stack_services.md index a80e0f0ad0ff..5cb3c110b661 100644 --- a/docs/reference/commandline/stack_services.md +++ b/docs/reference/commandline/stack_services.md @@ -64,8 +64,23 @@ dn7m7nhhfb9y myapp_db 1/1 mysql@sha256:a9a5b559f8821fe73d58c3606c8 The currently supported filters are: * id / ID (`--filter id=7be5ei6sqeye`, or `--filter ID=7be5ei6sqeye`) -* name (`--filter name=myapp_web`) + * Swarm: supported + * Kubernetes: not supported * label (`--filter label=key=value`) + * Swarm: supported + * Kubernetes: supported +* mode (`--filter mode=replicated`, or `--filter mode=global`) + * Swarm: not supported + * Kubernetes: supported +* name (`--filter name=myapp_web`) + * Swarm: supported + * Kubernetes: supported +* node (`--filter node=mynode`) + * Swarm: not supported + * Kubernetes: supported +* service (`--filter service=web`) + * Swarm: not supported + * Kubernetes: supported ### Formatting diff --git a/vendor/k8s.io/apimachinery/pkg/apimachinery/announced/announced.go b/vendor/k8s.io/apimachinery/pkg/apimachinery/announced/announced.go deleted file mode 100644 index 2c8568c1f768..000000000000 --- a/vendor/k8s.io/apimachinery/pkg/apimachinery/announced/announced.go +++ /dev/null @@ -1,99 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package announced contains tools for announcing API group factories. This is -// distinct from registration (in the 'registered' package) in that it's safe -// to announce every possible group linked in, but only groups requested at -// runtime should be registered. This package contains both a registry, and -// factory code (which was formerly copy-pasta in every install package). -package announced - -import ( - "fmt" - - "k8s.io/apimachinery/pkg/apimachinery/registered" - "k8s.io/apimachinery/pkg/runtime" -) - -// APIGroupFactoryRegistry allows for groups and versions to announce themselves, -// which simply makes them available and doesn't take other actions. Later, -// users of the registry can select which groups and versions they'd actually -// like to register with an APIRegistrationManager. -// -// (Right now APIRegistrationManager has separate 'registration' and 'enabled' -// concepts-- APIGroupFactory is going to take over the former function; -// they will overlap untill the refactoring is finished.) -// -// The key is the group name. After initialization, this should be treated as -// read-only. It is implemented as a map from group name to group factory, and -// it is safe to use this knowledge to manually pick out groups to register -// (e.g., for testing). -type APIGroupFactoryRegistry map[string]*GroupMetaFactory - -func (gar APIGroupFactoryRegistry) group(groupName string) *GroupMetaFactory { - gmf, ok := gar[groupName] - if !ok { - gmf = &GroupMetaFactory{VersionArgs: map[string]*GroupVersionFactoryArgs{}} - gar[groupName] = gmf - } - return gmf -} - -// AnnounceGroupVersion adds the particular arguments for this group version to the group factory. -func (gar APIGroupFactoryRegistry) AnnounceGroupVersion(gvf *GroupVersionFactoryArgs) error { - gmf := gar.group(gvf.GroupName) - if _, ok := gmf.VersionArgs[gvf.VersionName]; ok { - return fmt.Errorf("version %q in group %q has already been announced", gvf.VersionName, gvf.GroupName) - } - gmf.VersionArgs[gvf.VersionName] = gvf - return nil -} - -// AnnounceGroup adds the group-wide arguments to the group factory. -func (gar APIGroupFactoryRegistry) AnnounceGroup(args *GroupMetaFactoryArgs) error { - gmf := gar.group(args.GroupName) - if gmf.GroupArgs != nil { - return fmt.Errorf("group %q has already been announced", args.GroupName) - } - gmf.GroupArgs = args - return nil -} - -// RegisterAndEnableAll throws every factory at the specified API registration -// manager, and lets it decide which to register. (If you want to do this a la -// cart, you may look through gar itself-- it's just a map.) -func (gar APIGroupFactoryRegistry) RegisterAndEnableAll(m *registered.APIRegistrationManager, scheme *runtime.Scheme) error { - for groupName, gmf := range gar { - if err := gmf.Register(m); err != nil { - return fmt.Errorf("error registering %v: %v", groupName, err) - } - if err := gmf.Enable(m, scheme); err != nil { - return fmt.Errorf("error enabling %v: %v", groupName, err) - } - } - return nil -} - -// AnnouncePreconstructedFactory announces a factory which you've manually assembled. -// You may call this instead of calling AnnounceGroup and AnnounceGroupVersion. -func (gar APIGroupFactoryRegistry) AnnouncePreconstructedFactory(gmf *GroupMetaFactory) error { - name := gmf.GroupArgs.GroupName - if _, exists := gar[name]; exists { - return fmt.Errorf("the group %q has already been announced.", name) - } - gar[name] = gmf - return nil -} diff --git a/vendor/k8s.io/apimachinery/pkg/apimachinery/announced/group_factory.go b/vendor/k8s.io/apimachinery/pkg/apimachinery/announced/group_factory.go deleted file mode 100644 index 154ed08f5511..000000000000 --- a/vendor/k8s.io/apimachinery/pkg/apimachinery/announced/group_factory.go +++ /dev/null @@ -1,255 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package announced - -import ( - "fmt" - - "github.com/golang/glog" - - "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/apimachinery" - "k8s.io/apimachinery/pkg/apimachinery/registered" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/util/sets" -) - -type SchemeFunc func(*runtime.Scheme) error -type VersionToSchemeFunc map[string]SchemeFunc - -// GroupVersionFactoryArgs contains all the per-version parts of a GroupMetaFactory. -type GroupVersionFactoryArgs struct { - GroupName string - VersionName string - - AddToScheme SchemeFunc -} - -// GroupMetaFactoryArgs contains the group-level args of a GroupMetaFactory. -type GroupMetaFactoryArgs struct { - // GroupName is the name of the API-Group - // - // example: 'servicecatalog.k8s.io' - GroupName string - VersionPreferenceOrder []string - // RootScopedKinds are resources that are not namespaced. - RootScopedKinds sets.String // nil is allowed - IgnoredKinds sets.String // nil is allowed - - // May be nil if there are no internal objects. - AddInternalObjectsToScheme SchemeFunc -} - -// NewGroupMetaFactory builds the args for you. This is for if you're -// constructing a factory all at once and not using the registry. -func NewGroupMetaFactory(groupArgs *GroupMetaFactoryArgs, versions VersionToSchemeFunc) *GroupMetaFactory { - gmf := &GroupMetaFactory{ - GroupArgs: groupArgs, - VersionArgs: map[string]*GroupVersionFactoryArgs{}, - } - for v, f := range versions { - gmf.VersionArgs[v] = &GroupVersionFactoryArgs{ - GroupName: groupArgs.GroupName, - VersionName: v, - AddToScheme: f, - } - } - return gmf -} - -// Announce adds this Group factory to the global factory registry. It should -// only be called if you constructed the GroupMetaFactory yourself via -// NewGroupMetaFactory. -// Note that this will panic on an error, since it's expected that you'll be -// calling this at initialization time and any error is a result of a -// programmer importing the wrong set of packages. If this assumption doesn't -// work for you, just call DefaultGroupFactoryRegistry.AnnouncePreconstructedFactory -// yourself. -func (gmf *GroupMetaFactory) Announce(groupFactoryRegistry APIGroupFactoryRegistry) *GroupMetaFactory { - if err := groupFactoryRegistry.AnnouncePreconstructedFactory(gmf); err != nil { - panic(err) - } - return gmf -} - -// GroupMetaFactory has the logic for actually assembling and registering a group. -// -// There are two ways of obtaining one of these. -// 1. You can announce your group and versions separately, and then let the -// GroupFactoryRegistry assemble this object for you. (This allows group and -// versions to be imported separately, without referencing each other, to -// keep import trees small.) -// 2. You can call NewGroupMetaFactory(), which is mostly a drop-in replacement -// for the old, bad way of doing things. You can then call .Announce() to -// announce your constructed factory to any code that would like to do -// things the new, better way. -// -// Note that GroupMetaFactory actually does construct GroupMeta objects, but -// currently it does so in a way that's very entangled with an -// APIRegistrationManager. It's a TODO item to cleanly separate that interface. -type GroupMetaFactory struct { - GroupArgs *GroupMetaFactoryArgs - // map of version name to version factory - VersionArgs map[string]*GroupVersionFactoryArgs - - // assembled by Register() - prioritizedVersionList []schema.GroupVersion -} - -// Register constructs the finalized prioritized version list and sanity checks -// the announced group & versions. Then it calls register. -func (gmf *GroupMetaFactory) Register(m *registered.APIRegistrationManager) error { - if gmf.GroupArgs == nil { - return fmt.Errorf("partially announced groups are not allowed, only got versions: %#v", gmf.VersionArgs) - } - if len(gmf.VersionArgs) == 0 { - return fmt.Errorf("group %v announced but no versions announced", gmf.GroupArgs.GroupName) - } - - pvSet := sets.NewString(gmf.GroupArgs.VersionPreferenceOrder...) - if pvSet.Len() != len(gmf.GroupArgs.VersionPreferenceOrder) { - return fmt.Errorf("preference order for group %v has duplicates: %v", gmf.GroupArgs.GroupName, gmf.GroupArgs.VersionPreferenceOrder) - } - prioritizedVersions := []schema.GroupVersion{} - for _, v := range gmf.GroupArgs.VersionPreferenceOrder { - prioritizedVersions = append( - prioritizedVersions, - schema.GroupVersion{ - Group: gmf.GroupArgs.GroupName, - Version: v, - }, - ) - } - - // Go through versions that weren't explicitly prioritized. - unprioritizedVersions := []schema.GroupVersion{} - for _, v := range gmf.VersionArgs { - if v.GroupName != gmf.GroupArgs.GroupName { - return fmt.Errorf("found %v/%v in group %v?", v.GroupName, v.VersionName, gmf.GroupArgs.GroupName) - } - if pvSet.Has(v.VersionName) { - pvSet.Delete(v.VersionName) - continue - } - unprioritizedVersions = append(unprioritizedVersions, schema.GroupVersion{Group: v.GroupName, Version: v.VersionName}) - } - if len(unprioritizedVersions) > 1 { - glog.Warningf("group %v has multiple unprioritized versions: %#v. They will have an arbitrary preference order!", gmf.GroupArgs.GroupName, unprioritizedVersions) - } - if pvSet.Len() != 0 { - return fmt.Errorf("group %v has versions in the priority list that were never announced: %s", gmf.GroupArgs.GroupName, pvSet) - } - prioritizedVersions = append(prioritizedVersions, unprioritizedVersions...) - m.RegisterVersions(prioritizedVersions) - gmf.prioritizedVersionList = prioritizedVersions - return nil -} - -func (gmf *GroupMetaFactory) newRESTMapper(scheme *runtime.Scheme, externalVersions []schema.GroupVersion, groupMeta *apimachinery.GroupMeta) meta.RESTMapper { - // the list of kinds that are scoped at the root of the api hierarchy - // if a kind is not enumerated here, it is assumed to have a namespace scope - rootScoped := sets.NewString() - if gmf.GroupArgs.RootScopedKinds != nil { - rootScoped = gmf.GroupArgs.RootScopedKinds - } - ignoredKinds := sets.NewString() - if gmf.GroupArgs.IgnoredKinds != nil { - ignoredKinds = gmf.GroupArgs.IgnoredKinds - } - - mapper := meta.NewDefaultRESTMapper(externalVersions, groupMeta.InterfacesFor) - for _, gv := range externalVersions { - for kind := range scheme.KnownTypes(gv) { - if ignoredKinds.Has(kind) { - continue - } - scope := meta.RESTScopeNamespace - if rootScoped.Has(kind) { - scope = meta.RESTScopeRoot - } - mapper.Add(gv.WithKind(kind), scope) - } - } - - return mapper -} - -// Enable enables group versions that are allowed, adds methods to the scheme, etc. -func (gmf *GroupMetaFactory) Enable(m *registered.APIRegistrationManager, scheme *runtime.Scheme) error { - externalVersions := []schema.GroupVersion{} - for _, v := range gmf.prioritizedVersionList { - if !m.IsAllowedVersion(v) { - continue - } - externalVersions = append(externalVersions, v) - if err := m.EnableVersions(v); err != nil { - return err - } - gmf.VersionArgs[v.Version].AddToScheme(scheme) - } - if len(externalVersions) == 0 { - glog.V(4).Infof("No version is registered for group %v", gmf.GroupArgs.GroupName) - return nil - } - - if gmf.GroupArgs.AddInternalObjectsToScheme != nil { - gmf.GroupArgs.AddInternalObjectsToScheme(scheme) - } - - preferredExternalVersion := externalVersions[0] - accessor := meta.NewAccessor() - - groupMeta := &apimachinery.GroupMeta{ - GroupVersion: preferredExternalVersion, - GroupVersions: externalVersions, - SelfLinker: runtime.SelfLinker(accessor), - } - for _, v := range externalVersions { - gvf := gmf.VersionArgs[v.Version] - if err := groupMeta.AddVersionInterfaces( - schema.GroupVersion{Group: gvf.GroupName, Version: gvf.VersionName}, - &meta.VersionInterfaces{ - ObjectConvertor: scheme, - MetadataAccessor: accessor, - }, - ); err != nil { - return err - } - } - groupMeta.InterfacesFor = groupMeta.DefaultInterfacesFor - groupMeta.RESTMapper = gmf.newRESTMapper(scheme, externalVersions, groupMeta) - - if err := m.RegisterGroup(*groupMeta); err != nil { - return err - } - return nil -} - -// RegisterAndEnable is provided only to allow this code to get added in multiple steps. -// It's really bad that this is called in init() methods, but supporting this -// temporarily lets us do the change incrementally. -func (gmf *GroupMetaFactory) RegisterAndEnable(registry *registered.APIRegistrationManager, scheme *runtime.Scheme) error { - if err := gmf.Register(registry); err != nil { - return err - } - if err := gmf.Enable(registry, scheme); err != nil { - return err - } - - return nil -} diff --git a/vendor/k8s.io/apimachinery/pkg/apimachinery/doc.go b/vendor/k8s.io/apimachinery/pkg/apimachinery/doc.go deleted file mode 100644 index b238454b261d..000000000000 --- a/vendor/k8s.io/apimachinery/pkg/apimachinery/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package apimachinery contains the generic API machinery code that -// is common to both server and clients. -// This package should never import specific API objects. -package apimachinery // import "k8s.io/apimachinery/pkg/apimachinery" diff --git a/vendor/k8s.io/apimachinery/pkg/apimachinery/registered/registered.go b/vendor/k8s.io/apimachinery/pkg/apimachinery/registered/registered.go deleted file mode 100644 index 5150db016598..000000000000 --- a/vendor/k8s.io/apimachinery/pkg/apimachinery/registered/registered.go +++ /dev/null @@ -1,336 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package to keep track of API Versions that can be registered and are enabled in api.Scheme. -package registered - -import ( - "fmt" - "sort" - "strings" - - "github.com/golang/glog" - - "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/apimachinery" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/util/sets" -) - -// APIRegistrationManager provides the concept of what API groups are enabled. -// -// TODO: currently, it also provides a "registered" concept. But it's wrong to -// have both concepts in the same object. Therefore the "announced" package is -// going to take over the registered concept. After all the install packages -// are switched to using the announce package instead of this package, then we -// can combine the registered/enabled concepts in this object. Simplifying this -// isn't easy right now because there are so many callers of this package. -type APIRegistrationManager struct { - // registeredGroupVersions stores all API group versions for which RegisterGroup is called. - registeredVersions map[schema.GroupVersion]struct{} - - // enabledVersions represents all enabled API versions. It should be a - // subset of registeredVersions. Please call EnableVersions() to add - // enabled versions. - enabledVersions map[schema.GroupVersion]struct{} - - // map of group meta for all groups. - groupMetaMap map[string]*apimachinery.GroupMeta - - // envRequestedVersions represents the versions requested via the - // KUBE_API_VERSIONS environment variable. The install package of each group - // checks this list before add their versions to the latest package and - // Scheme. This list is small and order matters, so represent as a slice - envRequestedVersions []schema.GroupVersion -} - -// NewAPIRegistrationManager constructs a new manager. The argument ought to be -// the value of the KUBE_API_VERSIONS env var, or a value of this which you -// wish to test. -func NewAPIRegistrationManager(kubeAPIVersions string) (*APIRegistrationManager, error) { - m := &APIRegistrationManager{ - registeredVersions: map[schema.GroupVersion]struct{}{}, - enabledVersions: map[schema.GroupVersion]struct{}{}, - groupMetaMap: map[string]*apimachinery.GroupMeta{}, - envRequestedVersions: []schema.GroupVersion{}, - } - - if len(kubeAPIVersions) != 0 { - for _, version := range strings.Split(kubeAPIVersions, ",") { - gv, err := schema.ParseGroupVersion(version) - if err != nil { - return nil, fmt.Errorf("invalid api version: %s in KUBE_API_VERSIONS: %s.", - version, kubeAPIVersions) - } - m.envRequestedVersions = append(m.envRequestedVersions, gv) - } - } - return m, nil -} - -func NewOrDie(kubeAPIVersions string) *APIRegistrationManager { - m, err := NewAPIRegistrationManager(kubeAPIVersions) - if err != nil { - glog.Fatalf("Could not construct version manager: %v (KUBE_API_VERSIONS=%q)", err, kubeAPIVersions) - } - return m -} - -// RegisterVersions adds the given group versions to the list of registered group versions. -func (m *APIRegistrationManager) RegisterVersions(availableVersions []schema.GroupVersion) { - for _, v := range availableVersions { - m.registeredVersions[v] = struct{}{} - } -} - -// RegisterGroup adds the given group to the list of registered groups. -func (m *APIRegistrationManager) RegisterGroup(groupMeta apimachinery.GroupMeta) error { - groupName := groupMeta.GroupVersion.Group - if _, found := m.groupMetaMap[groupName]; found { - return fmt.Errorf("group %q is already registered in groupsMap: %v", groupName, m.groupMetaMap) - } - m.groupMetaMap[groupName] = &groupMeta - return nil -} - -// EnableVersions adds the versions for the given group to the list of enabled versions. -// Note that the caller should call RegisterGroup before calling this method. -// The caller of this function is responsible to add the versions to scheme and RESTMapper. -func (m *APIRegistrationManager) EnableVersions(versions ...schema.GroupVersion) error { - var unregisteredVersions []schema.GroupVersion - for _, v := range versions { - if _, found := m.registeredVersions[v]; !found { - unregisteredVersions = append(unregisteredVersions, v) - } - m.enabledVersions[v] = struct{}{} - } - if len(unregisteredVersions) != 0 { - return fmt.Errorf("Please register versions before enabling them: %v", unregisteredVersions) - } - return nil -} - -// IsAllowedVersion returns if the version is allowed by the KUBE_API_VERSIONS -// environment variable. If the environment variable is empty, then it always -// returns true. -func (m *APIRegistrationManager) IsAllowedVersion(v schema.GroupVersion) bool { - if len(m.envRequestedVersions) == 0 { - return true - } - for _, envGV := range m.envRequestedVersions { - if v == envGV { - return true - } - } - return false -} - -// IsEnabledVersion returns if a version is enabled. -func (m *APIRegistrationManager) IsEnabledVersion(v schema.GroupVersion) bool { - _, found := m.enabledVersions[v] - return found -} - -// EnabledVersions returns all enabled versions. Groups are randomly ordered, but versions within groups -// are priority order from best to worst -func (m *APIRegistrationManager) EnabledVersions() []schema.GroupVersion { - ret := []schema.GroupVersion{} - for _, groupMeta := range m.groupMetaMap { - for _, version := range groupMeta.GroupVersions { - if m.IsEnabledVersion(version) { - ret = append(ret, version) - } - } - } - return ret -} - -// EnabledVersionsForGroup returns all enabled versions for a group in order of best to worst -func (m *APIRegistrationManager) EnabledVersionsForGroup(group string) []schema.GroupVersion { - groupMeta, ok := m.groupMetaMap[group] - if !ok { - return []schema.GroupVersion{} - } - - ret := []schema.GroupVersion{} - for _, version := range groupMeta.GroupVersions { - if m.IsEnabledVersion(version) { - ret = append(ret, version) - } - } - return ret -} - -// Group returns the metadata of a group if the group is registered, otherwise -// an error is returned. -func (m *APIRegistrationManager) Group(group string) (*apimachinery.GroupMeta, error) { - groupMeta, found := m.groupMetaMap[group] - if !found { - return nil, fmt.Errorf("group %v has not been registered", group) - } - groupMetaCopy := *groupMeta - return &groupMetaCopy, nil -} - -// IsRegistered takes a string and determines if it's one of the registered groups -func (m *APIRegistrationManager) IsRegistered(group string) bool { - _, found := m.groupMetaMap[group] - return found -} - -// IsRegisteredVersion returns if a version is registered. -func (m *APIRegistrationManager) IsRegisteredVersion(v schema.GroupVersion) bool { - _, found := m.registeredVersions[v] - return found -} - -// RegisteredGroupVersions returns all registered group versions. -func (m *APIRegistrationManager) RegisteredGroupVersions() []schema.GroupVersion { - ret := []schema.GroupVersion{} - for groupVersion := range m.registeredVersions { - ret = append(ret, groupVersion) - } - return ret -} - -// InterfacesFor is a union meta.VersionInterfacesFunc func for all registered types -func (m *APIRegistrationManager) InterfacesFor(version schema.GroupVersion) (*meta.VersionInterfaces, error) { - groupMeta, err := m.Group(version.Group) - if err != nil { - return nil, err - } - return groupMeta.InterfacesFor(version) -} - -// TODO: This is an expedient function, because we don't check if a Group is -// supported throughout the code base. We will abandon this function and -// checking the error returned by the Group() function. -func (m *APIRegistrationManager) GroupOrDie(group string) *apimachinery.GroupMeta { - groupMeta, found := m.groupMetaMap[group] - if !found { - if group == "" { - panic("The legacy v1 API is not registered.") - } else { - panic(fmt.Sprintf("Group %s is not registered.", group)) - } - } - groupMetaCopy := *groupMeta - return &groupMetaCopy -} - -// RESTMapper returns a union RESTMapper of all known types with priorities chosen in the following order: -// 1. if KUBE_API_VERSIONS is specified, then KUBE_API_VERSIONS in order, OR -// 1. legacy kube group preferred version, extensions preferred version, metrics perferred version, legacy -// kube any version, extensions any version, metrics any version, all other groups alphabetical preferred version, -// all other groups alphabetical. -func (m *APIRegistrationManager) RESTMapper(versionPatterns ...schema.GroupVersion) meta.RESTMapper { - unionMapper := meta.MultiRESTMapper{} - unionedGroups := sets.NewString() - for enabledVersion := range m.enabledVersions { - if !unionedGroups.Has(enabledVersion.Group) { - unionedGroups.Insert(enabledVersion.Group) - groupMeta := m.groupMetaMap[enabledVersion.Group] - unionMapper = append(unionMapper, groupMeta.RESTMapper) - } - } - - if len(versionPatterns) != 0 { - resourcePriority := []schema.GroupVersionResource{} - kindPriority := []schema.GroupVersionKind{} - for _, versionPriority := range versionPatterns { - resourcePriority = append(resourcePriority, versionPriority.WithResource(meta.AnyResource)) - kindPriority = append(kindPriority, versionPriority.WithKind(meta.AnyKind)) - } - - return meta.PriorityRESTMapper{Delegate: unionMapper, ResourcePriority: resourcePriority, KindPriority: kindPriority} - } - - if len(m.envRequestedVersions) != 0 { - resourcePriority := []schema.GroupVersionResource{} - kindPriority := []schema.GroupVersionKind{} - - for _, versionPriority := range m.envRequestedVersions { - resourcePriority = append(resourcePriority, versionPriority.WithResource(meta.AnyResource)) - kindPriority = append(kindPriority, versionPriority.WithKind(meta.AnyKind)) - } - - return meta.PriorityRESTMapper{Delegate: unionMapper, ResourcePriority: resourcePriority, KindPriority: kindPriority} - } - - prioritizedGroups := []string{"", "extensions", "metrics"} - resourcePriority, kindPriority := m.prioritiesForGroups(prioritizedGroups...) - - prioritizedGroupsSet := sets.NewString(prioritizedGroups...) - remainingGroups := sets.String{} - for enabledVersion := range m.enabledVersions { - if !prioritizedGroupsSet.Has(enabledVersion.Group) { - remainingGroups.Insert(enabledVersion.Group) - } - } - - remainingResourcePriority, remainingKindPriority := m.prioritiesForGroups(remainingGroups.List()...) - resourcePriority = append(resourcePriority, remainingResourcePriority...) - kindPriority = append(kindPriority, remainingKindPriority...) - - return meta.PriorityRESTMapper{Delegate: unionMapper, ResourcePriority: resourcePriority, KindPriority: kindPriority} -} - -// prioritiesForGroups returns the resource and kind priorities for a PriorityRESTMapper, preferring the preferred version of each group first, -// then any non-preferred version of the group second. -func (m *APIRegistrationManager) prioritiesForGroups(groups ...string) ([]schema.GroupVersionResource, []schema.GroupVersionKind) { - resourcePriority := []schema.GroupVersionResource{} - kindPriority := []schema.GroupVersionKind{} - - for _, group := range groups { - availableVersions := m.EnabledVersionsForGroup(group) - if len(availableVersions) > 0 { - resourcePriority = append(resourcePriority, availableVersions[0].WithResource(meta.AnyResource)) - kindPriority = append(kindPriority, availableVersions[0].WithKind(meta.AnyKind)) - } - } - for _, group := range groups { - resourcePriority = append(resourcePriority, schema.GroupVersionResource{Group: group, Version: meta.AnyVersion, Resource: meta.AnyResource}) - kindPriority = append(kindPriority, schema.GroupVersionKind{Group: group, Version: meta.AnyVersion, Kind: meta.AnyKind}) - } - - return resourcePriority, kindPriority -} - -// AllPreferredGroupVersions returns the preferred versions of all registered -// groups in the form of "group1/version1,group2/version2,..." -func (m *APIRegistrationManager) AllPreferredGroupVersions() string { - if len(m.groupMetaMap) == 0 { - return "" - } - var defaults []string - for _, groupMeta := range m.groupMetaMap { - defaults = append(defaults, groupMeta.GroupVersion.String()) - } - sort.Strings(defaults) - return strings.Join(defaults, ",") -} - -// ValidateEnvRequestedVersions returns a list of versions that are requested in -// the KUBE_API_VERSIONS environment variable, but not enabled. -func (m *APIRegistrationManager) ValidateEnvRequestedVersions() []schema.GroupVersion { - var missingVersions []schema.GroupVersion - for _, v := range m.envRequestedVersions { - if _, found := m.enabledVersions[v]; !found { - missingVersions = append(missingVersions, v) - } - } - return missingVersions -} diff --git a/vendor/k8s.io/apimachinery/pkg/apimachinery/types.go b/vendor/k8s.io/apimachinery/pkg/apimachinery/types.go deleted file mode 100644 index 213e34bc00e3..000000000000 --- a/vendor/k8s.io/apimachinery/pkg/apimachinery/types.go +++ /dev/null @@ -1,87 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package apimachinery - -import ( - "fmt" - - "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// GroupMeta stores the metadata of a group. -type GroupMeta struct { - // GroupVersion represents the preferred version of the group. - GroupVersion schema.GroupVersion - - // GroupVersions is Group + all versions in that group. - GroupVersions []schema.GroupVersion - - // SelfLinker can set or get the SelfLink field of all API types. - // TODO: when versioning changes, make this part of each API definition. - // TODO(lavalamp): Combine SelfLinker & ResourceVersioner interfaces, force all uses - // to go through the InterfacesFor method below. - SelfLinker runtime.SelfLinker - - // RESTMapper provides the default mapping between REST paths and the objects declared in api.Scheme and all known - // versions. - RESTMapper meta.RESTMapper - - // InterfacesFor returns the default Codec and ResourceVersioner for a given version - // string, or an error if the version is not known. - // TODO: make this stop being a func pointer and always use the default - // function provided below once every place that populates this field has been changed. - InterfacesFor func(version schema.GroupVersion) (*meta.VersionInterfaces, error) - - // InterfacesByVersion stores the per-version interfaces. - InterfacesByVersion map[schema.GroupVersion]*meta.VersionInterfaces -} - -// DefaultInterfacesFor returns the default Codec and ResourceVersioner for a given version -// string, or an error if the version is not known. -// TODO: Remove the "Default" prefix. -func (gm *GroupMeta) DefaultInterfacesFor(version schema.GroupVersion) (*meta.VersionInterfaces, error) { - if v, ok := gm.InterfacesByVersion[version]; ok { - return v, nil - } - return nil, fmt.Errorf("unsupported storage version: %s (valid: %v)", version, gm.GroupVersions) -} - -// AddVersionInterfaces adds the given version to the group. Only call during -// init, after that GroupMeta objects should be immutable. Not thread safe. -// (If you use this, be sure to set .InterfacesFor = .DefaultInterfacesFor) -// TODO: remove the "Interfaces" suffix and make this also maintain the -// .GroupVersions member. -func (gm *GroupMeta) AddVersionInterfaces(version schema.GroupVersion, interfaces *meta.VersionInterfaces) error { - if e, a := gm.GroupVersion.Group, version.Group; a != e { - return fmt.Errorf("got a version in group %v, but am in group %v", a, e) - } - if gm.InterfacesByVersion == nil { - gm.InterfacesByVersion = make(map[schema.GroupVersion]*meta.VersionInterfaces) - } - gm.InterfacesByVersion[version] = interfaces - - // TODO: refactor to make the below error not possible, this function - // should *set* GroupVersions rather than depend on it. - for _, v := range gm.GroupVersions { - if v == version { - return nil - } - } - return fmt.Errorf("added a version interface without the corresponding version %v being in the list %#v", version, gm.GroupVersions) -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/annotation_key_constants.go b/vendor/k8s.io/kubernetes/pkg/api/annotation_key_constants.go deleted file mode 100644 index e935424462b5..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/annotation_key_constants.go +++ /dev/null @@ -1,94 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file should be consistent with pkg/api/v1/annotation_key_constants.go. - -package api - -const ( - // ImagePolicyFailedOpenKey is added to pods created by failing open when the image policy - // webhook backend fails. - ImagePolicyFailedOpenKey string = "alpha.image-policy.k8s.io/failed-open" - - // PodPresetOptOutAnnotationKey represents the annotation key for a pod to exempt itself from pod preset manipulation - PodPresetOptOutAnnotationKey string = "podpreset.admission.kubernetes.io/exclude" - - // MirrorAnnotationKey represents the annotation key set by kubelets when creating mirror pods - MirrorPodAnnotationKey string = "kubernetes.io/config.mirror" - - // TolerationsAnnotationKey represents the key of tolerations data (json serialized) - // in the Annotations of a Pod. - TolerationsAnnotationKey string = "scheduler.alpha.kubernetes.io/tolerations" - - // TaintsAnnotationKey represents the key of taints data (json serialized) - // in the Annotations of a Node. - TaintsAnnotationKey string = "scheduler.alpha.kubernetes.io/taints" - - // SeccompPodAnnotationKey represents the key of a seccomp profile applied - // to all containers of a pod. - SeccompPodAnnotationKey string = "seccomp.security.alpha.kubernetes.io/pod" - - // SeccompContainerAnnotationKeyPrefix represents the key of a seccomp profile applied - // to one container of a pod. - SeccompContainerAnnotationKeyPrefix string = "container.seccomp.security.alpha.kubernetes.io/" - - // CreatedByAnnotation represents the key used to store the spec(json) - // used to create the resource. - // This field is deprecated in favor of ControllerRef (see #44407). - // TODO(#50720): Remove this field in v1.9. - CreatedByAnnotation = "kubernetes.io/created-by" - - // PreferAvoidPodsAnnotationKey represents the key of preferAvoidPods data (json serialized) - // in the Annotations of a Node. - PreferAvoidPodsAnnotationKey string = "scheduler.alpha.kubernetes.io/preferAvoidPods" - - // SysctlsPodAnnotationKey represents the key of sysctls which are set for the infrastructure - // container of a pod. The annotation value is a comma separated list of sysctl_name=value - // key-value pairs. Only a limited set of whitelisted and isolated sysctls is supported by - // the kubelet. Pods with other sysctls will fail to launch. - SysctlsPodAnnotationKey string = "security.alpha.kubernetes.io/sysctls" - - // UnsafeSysctlsPodAnnotationKey represents the key of sysctls which are set for the infrastructure - // container of a pod. The annotation value is a comma separated list of sysctl_name=value - // key-value pairs. Unsafe sysctls must be explicitly enabled for a kubelet. They are properly - // namespaced to a pod or a container, but their isolation is usually unclear or weak. Their use - // is at-your-own-risk. Pods that attempt to set an unsafe sysctl that is not enabled for a kubelet - // will fail to launch. - UnsafeSysctlsPodAnnotationKey string = "security.alpha.kubernetes.io/unsafe-sysctls" - - // ObjectTTLAnnotations represents a suggestion for kubelet for how long it can cache - // an object (e.g. secret, config map) before fetching it again from apiserver. - // This annotation can be attached to node. - ObjectTTLAnnotationKey string = "node.alpha.kubernetes.io/ttl" - - // annotation key prefix used to identify non-convertible json paths. - NonConvertibleAnnotationPrefix = "non-convertible.kubernetes.io" - - kubectlPrefix = "kubectl.kubernetes.io/" - - // LastAppliedConfigAnnotation is the annotation used to store the previous - // configuration of a resource for use in a three way diff by UpdateApplyAnnotation. - LastAppliedConfigAnnotation = kubectlPrefix + "last-applied-configuration" - - // AnnotationLoadBalancerSourceRangesKey is the key of the annotation on a service to set allowed ingress ranges on their LoadBalancers - // - // It should be a comma-separated list of CIDRs, e.g. `0.0.0.0/0` to - // allow full access (the default) or `18.0.0.0/8,56.0.0.0/8` to allow - // access only from the CIDRs currently allocated to MIT & the USPS. - // - // Not all cloud providers support this annotation, though AWS & GCE do. - AnnotationLoadBalancerSourceRangesKey = "service.beta.kubernetes.io/load-balancer-source-ranges" -) diff --git a/vendor/k8s.io/kubernetes/pkg/api/doc.go b/vendor/k8s.io/kubernetes/pkg/api/doc.go deleted file mode 100644 index 283a83e402df..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/doc.go +++ /dev/null @@ -1,24 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:deepcopy-gen=package,register - -// Package api contains the latest (or "internal") version of the -// Kubernetes API objects. This is the API objects as represented in memory. -// The contract presented to clients is located in the versioned packages, -// which are sub-directories. The first one is "v1". Those packages -// describe how a particular version is serialized to storage/network. -package api // import "k8s.io/kubernetes/pkg/api" diff --git a/vendor/k8s.io/kubernetes/pkg/api/field_constants.go b/vendor/k8s.io/kubernetes/pkg/api/field_constants.go deleted file mode 100644 index 5ead0f13feb8..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/field_constants.go +++ /dev/null @@ -1,38 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package api - -// Field path constants that are specific to the internal API -// representation. -const ( - NodeUnschedulableField = "spec.unschedulable" - ObjectNameField = "metadata.name" - PodHostField = "spec.nodeName" - PodStatusField = "status.phase" - SecretTypeField = "type" - - EventReasonField = "reason" - EventSourceField = "source" - EventTypeField = "type" - EventInvolvedKindField = "involvedObject.kind" - EventInvolvedNamespaceField = "involvedObject.namespace" - EventInvolvedNameField = "involvedObject.name" - EventInvolvedUIDField = "involvedObject.uid" - EventInvolvedAPIVersionField = "involvedObject.apiVersion" - EventInvolvedResourceVersionField = "involvedObject.resourceVersion" - EventInvolvedFieldPathField = "involvedObject.fieldPath" -) diff --git a/vendor/k8s.io/kubernetes/pkg/api/json.go b/vendor/k8s.io/kubernetes/pkg/api/json.go deleted file mode 100644 index 3a6e04c182fd..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/json.go +++ /dev/null @@ -1,28 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package api - -import "encoding/json" - -// This file implements json marshaling/unmarshaling interfaces on objects that are currently marshaled into annotations -// to prevent anyone from marshaling these internal structs. - -var _ = json.Marshaler(&AvoidPods{}) -var _ = json.Unmarshaler(&AvoidPods{}) - -func (AvoidPods) MarshalJSON() ([]byte, error) { panic("do not marshal internal struct") } -func (*AvoidPods) UnmarshalJSON([]byte) error { panic("do not unmarshal to internal struct") } diff --git a/vendor/k8s.io/kubernetes/pkg/api/objectreference.go b/vendor/k8s.io/kubernetes/pkg/api/objectreference.go deleted file mode 100644 index b36ecab27ff6..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/objectreference.go +++ /dev/null @@ -1,34 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -//TODO: consider making these methods functions, because we don't want helper -//functions in the k8s.io/api repo. - -package api - -import ( - "k8s.io/apimachinery/pkg/runtime/schema" -) - -func (obj *ObjectReference) SetGroupVersionKind(gvk schema.GroupVersionKind) { - obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind() -} - -func (obj *ObjectReference) GroupVersionKind() schema.GroupVersionKind { - return schema.FromAPIVersionAndKind(obj.APIVersion, obj.Kind) -} - -func (obj *ObjectReference) GetObjectKind() schema.ObjectKind { return obj } diff --git a/vendor/k8s.io/kubernetes/pkg/api/register.go b/vendor/k8s.io/kubernetes/pkg/api/register.go deleted file mode 100644 index 39562e5a566f..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/register.go +++ /dev/null @@ -1,124 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package api - -import ( - "os" - - "k8s.io/apimachinery/pkg/apimachinery/announced" - "k8s.io/apimachinery/pkg/apimachinery/registered" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/runtime/serializer" -) - -// GroupFactoryRegistry is the APIGroupFactoryRegistry (overlaps a bit with Registry, see comments in package for details) -var GroupFactoryRegistry = make(announced.APIGroupFactoryRegistry) - -// Registry is an instance of an API registry. This is an interim step to start removing the idea of a global -// API registry. -var Registry = registered.NewOrDie(os.Getenv("KUBE_API_VERSIONS")) - -// Scheme is the default instance of runtime.Scheme to which types in the Kubernetes API are already registered. -// NOTE: If you are copying this file to start a new api group, STOP! Copy the -// extensions group instead. This Scheme is special and should appear ONLY in -// the api group, unless you really know what you're doing. -// TODO(lavalamp): make the above error impossible. -var Scheme = runtime.NewScheme() - -// Codecs provides access to encoding and decoding for the scheme -var Codecs = serializer.NewCodecFactory(Scheme) - -// GroupName is the group name use in this package -const GroupName = "" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} - -// ParameterCodec handles versioning of objects that are converted to query parameters. -var ParameterCodec = runtime.NewParameterCodec(Scheme) - -// Kind takes an unqualified kind and returns a Group qualified GroupKind -func Kind(kind string) schema.GroupKind { - return SchemeGroupVersion.WithKind(kind).GroupKind() -} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -var ( - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - AddToScheme = SchemeBuilder.AddToScheme -) - -func addKnownTypes(scheme *runtime.Scheme) error { - if err := scheme.AddIgnoredConversionType(&metav1.TypeMeta{}, &metav1.TypeMeta{}); err != nil { - return err - } - scheme.AddKnownTypes(SchemeGroupVersion, - &Pod{}, - &PodList{}, - &PodStatusResult{}, - &PodTemplate{}, - &PodTemplateList{}, - &ReplicationControllerList{}, - &ReplicationController{}, - &ServiceList{}, - &Service{}, - &ServiceProxyOptions{}, - &NodeList{}, - &Node{}, - &NodeConfigSource{}, - &NodeProxyOptions{}, - &Endpoints{}, - &EndpointsList{}, - &Binding{}, - &Event{}, - &EventList{}, - &List{}, - &LimitRange{}, - &LimitRangeList{}, - &ResourceQuota{}, - &ResourceQuotaList{}, - &Namespace{}, - &NamespaceList{}, - &ServiceAccount{}, - &ServiceAccountList{}, - &Secret{}, - &SecretList{}, - &PersistentVolume{}, - &PersistentVolumeList{}, - &PersistentVolumeClaim{}, - &PersistentVolumeClaimList{}, - &PodAttachOptions{}, - &PodLogOptions{}, - &PodExecOptions{}, - &PodPortForwardOptions{}, - &PodProxyOptions{}, - &ComponentStatus{}, - &ComponentStatusList{}, - &SerializedReference{}, - &RangeAllocation{}, - &ConfigMap{}, - &ConfigMapList{}, - ) - - return nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/resource.go b/vendor/k8s.io/kubernetes/pkg/api/resource.go deleted file mode 100644 index ce1747d8e6be..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/resource.go +++ /dev/null @@ -1,62 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package api - -import ( - "k8s.io/apimachinery/pkg/api/resource" -) - -func (self ResourceName) String() string { - return string(self) -} - -// Returns the CPU limit if specified. -func (self *ResourceList) Cpu() *resource.Quantity { - if val, ok := (*self)[ResourceCPU]; ok { - return &val - } - return &resource.Quantity{Format: resource.DecimalSI} -} - -// Returns the Memory limit if specified. -func (self *ResourceList) Memory() *resource.Quantity { - if val, ok := (*self)[ResourceMemory]; ok { - return &val - } - return &resource.Quantity{Format: resource.BinarySI} -} - -func (self *ResourceList) Pods() *resource.Quantity { - if val, ok := (*self)[ResourcePods]; ok { - return &val - } - return &resource.Quantity{} -} - -func (self *ResourceList) NvidiaGPU() *resource.Quantity { - if val, ok := (*self)[ResourceNvidiaGPU]; ok { - return &val - } - return &resource.Quantity{} -} - -func (self *ResourceList) StorageEphemeral() *resource.Quantity { - if val, ok := (*self)[ResourceEphemeralStorage]; ok { - return &val - } - return &resource.Quantity{} -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/taint.go b/vendor/k8s.io/kubernetes/pkg/api/taint.go deleted file mode 100644 index 173e4e601614..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/taint.go +++ /dev/null @@ -1,36 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -//TODO: consider making these methods functions, because we don't want helper -//functions in the k8s.io/api repo. - -package api - -import "fmt" - -// MatchTaint checks if the taint matches taintToMatch. Taints are unique by key:effect, -// if the two taints have same key:effect, regard as they match. -func (t *Taint) MatchTaint(taintToMatch Taint) bool { - return t.Key == taintToMatch.Key && t.Effect == taintToMatch.Effect -} - -// taint.ToString() converts taint struct to string in format key=value:effect or key:effect. -func (t *Taint) ToString() string { - if len(t.Value) == 0 { - return fmt.Sprintf("%v:%v", t.Key, t.Effect) - } - return fmt.Sprintf("%v=%v:%v", t.Key, t.Value, t.Effect) -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/toleration.go b/vendor/k8s.io/kubernetes/pkg/api/toleration.go deleted file mode 100644 index edb73b74e1a6..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/toleration.go +++ /dev/null @@ -1,30 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -//TODO: consider making these methods functions, because we don't want helper -//functions in the k8s.io/api repo. - -package api - -// MatchToleration checks if the toleration matches tolerationToMatch. Tolerations are unique by , -// if the two tolerations have same combination, regard as they match. -// TODO: uniqueness check for tolerations in api validations. -func (t *Toleration) MatchToleration(tolerationToMatch *Toleration) bool { - return t.Key == tolerationToMatch.Key && - t.Effect == tolerationToMatch.Effect && - t.Operator == tolerationToMatch.Operator && - t.Value == tolerationToMatch.Value -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/types.go b/vendor/k8s.io/kubernetes/pkg/api/types.go deleted file mode 100644 index 92d45de61886..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/types.go +++ /dev/null @@ -1,4287 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package api - -import ( - "k8s.io/apimachinery/pkg/api/resource" - metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/fields" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/intstr" -) - -// Common string formats -// --------------------- -// Many fields in this API have formatting requirements. The commonly used -// formats are defined here. -// -// C_IDENTIFIER: This is a string that conforms to the definition of an "identifier" -// in the C language. This is captured by the following regex: -// [A-Za-z_][A-Za-z0-9_]* -// This defines the format, but not the length restriction, which should be -// specified at the definition of any field of this type. -// -// DNS_LABEL: This is a string, no more than 63 characters long, that conforms -// to the definition of a "label" in RFCs 1035 and 1123. This is captured -// by the following regex: -// [a-z0-9]([-a-z0-9]*[a-z0-9])? -// -// DNS_SUBDOMAIN: This is a string, no more than 253 characters long, that conforms -// to the definition of a "subdomain" in RFCs 1035 and 1123. This is captured -// by the following regex: -// [a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)* -// or more simply: -// DNS_LABEL(\.DNS_LABEL)* -// -// IANA_SVC_NAME: This is a string, no more than 15 characters long, that -// conforms to the definition of IANA service name in RFC 6335. -// It must contains at least one letter [a-z] and it must contains only [a-z0-9-]. -// Hypens ('-') cannot be leading or trailing character of the string -// and cannot be adjacent to other hyphens. - -// ObjectMeta is metadata that all persisted resources must have, which includes all objects -// users must create. -// DEPRECATED: Use k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta instead - this type will be removed soon. -type ObjectMeta struct { - // Name is unique within a namespace. Name is required when creating resources, although - // some resources may allow a client to request the generation of an appropriate name - // automatically. Name is primarily intended for creation idempotence and configuration - // definition. - // +optional - Name string - - // GenerateName indicates that the name should be made unique by the server prior to persisting - // it. A non-empty value for the field indicates the name will be made unique (and the name - // returned to the client will be different than the name passed). The value of this field will - // be combined with a unique suffix on the server if the Name field has not been provided. - // The provided value must be valid within the rules for Name, and may be truncated by the length - // of the suffix required to make the value unique on the server. - // - // If this field is specified, and Name is not present, the server will NOT return a 409 if the - // generated name exists - instead, it will either return 201 Created or 500 with Reason - // ServerTimeout indicating a unique name could not be found in the time allotted, and the client - // should retry (optionally after the time indicated in the Retry-After header). - // +optional - GenerateName string - - // Namespace defines the space within which name must be unique. An empty namespace is - // equivalent to the "default" namespace, but "default" is the canonical representation. - // Not all objects are required to be scoped to a namespace - the value of this field for - // those objects will be empty. - // +optional - Namespace string - - // SelfLink is a URL representing this object. - // +optional - SelfLink string - - // UID is the unique in time and space value for this object. It is typically generated by - // the server on successful creation of a resource and is not allowed to change on PUT - // operations. - // +optional - UID types.UID - - // An opaque value that represents the version of this resource. May be used for optimistic - // concurrency, change detection, and the watch operation on a resource or set of resources. - // Clients must treat these values as opaque and values may only be valid for a particular - // resource or set of resources. Only servers will generate resource versions. - // +optional - ResourceVersion string - - // A sequence number representing a specific generation of the desired state. - // Populated by the system. Read-only. - // +optional - Generation int64 - - // CreationTimestamp is a timestamp representing the server time when this object was - // created. It is not guaranteed to be set in happens-before order across separate operations. - // Clients may not set this value. It is represented in RFC3339 form and is in UTC. - // +optional - CreationTimestamp metav1.Time - - // DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This - // field is set by the server when a graceful deletion is requested by the user, and is not - // directly settable by a client. The resource is expected to be deleted (no longer visible - // from resource lists, and not reachable by name) after the time in this field. Once set, - // this value may not be unset or be set further into the future, although it may be shortened - // or the resource may be deleted prior to this time. For example, a user may request that - // a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination - // signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard - // termination signal (SIGKILL) to the container and after cleanup, remove the pod from the - // API. In the presence of network partitions, this object may still exist after this - // timestamp, until an administrator or automated process can determine the resource is - // fully terminated. - // If not set, graceful deletion of the object has not been requested. - // - // Populated by the system when a graceful deletion is requested. - // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - DeletionTimestamp *metav1.Time - - // DeletionGracePeriodSeconds records the graceful deletion value set when graceful deletion - // was requested. Represents the most recent grace period, and may only be shortened once set. - // +optional - DeletionGracePeriodSeconds *int64 - - // Labels are key value pairs that may be used to scope and select individual resources. - // Label keys are of the form: - // label-key ::= prefixed-name | name - // prefixed-name ::= prefix '/' name - // prefix ::= DNS_SUBDOMAIN - // name ::= DNS_LABEL - // The prefix is optional. If the prefix is not specified, the key is assumed to be private - // to the user. Other system components that wish to use labels must specify a prefix. The - // "kubernetes.io/" prefix is reserved for use by kubernetes components. - // +optional - Labels map[string]string - - // Annotations are unstructured key value data stored with a resource that may be set by - // external tooling. They are not queryable and should be preserved when modifying - // objects. Annotation keys have the same formatting restrictions as Label keys. See the - // comments on Labels for details. - // +optional - Annotations map[string]string - - // List of objects depended by this object. If ALL objects in the list have - // been deleted, this object will be garbage collected. If this object is managed by a controller, - // then an entry in this list will point to this controller, with the controller field set to true. - // There cannot be more than one managing controller. - // +optional - OwnerReferences []metav1.OwnerReference - - // An initializer is a controller which enforces some system invariant at object creation time. - // This field is a list of initializers that have not yet acted on this object. If nil or empty, - // this object has been completely initialized. Otherwise, the object is considered uninitialized - // and is hidden (in list/watch and get calls) from clients that haven't explicitly asked to - // observe uninitialized objects. - // - // When an object is created, the system will populate this list with the current set of initializers. - // Only privileged users may set or modify this list. Once it is empty, it may not be modified further - // by any user. - Initializers *metav1.Initializers - - // Must be empty before the object is deleted from the registry. Each entry - // is an identifier for the responsible component that will remove the entry - // from the list. If the deletionTimestamp of the object is non-nil, entries - // in this list can only be removed. - // +optional - Finalizers []string - - // The name of the cluster which the object belongs to. - // This is used to distinguish resources with same name and namespace in different clusters. - // This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request. - // +optional - ClusterName string -} - -const ( - // NamespaceDefault means the object is in the default namespace which is applied when not specified by clients - NamespaceDefault string = "default" - // NamespaceAll is the default argument to specify on a context when you want to list or filter resources across all namespaces - NamespaceAll string = "" - // NamespaceNone is the argument for a context when there is no namespace. - NamespaceNone string = "" - // NamespaceSystem is the system namespace where we place system components. - NamespaceSystem string = "kube-system" - // NamespacePublic is the namespace where we place public info (ConfigMaps) - NamespacePublic string = "kube-public" - // TerminationMessagePathDefault means the default path to capture the application termination message running in a container - TerminationMessagePathDefault string = "/dev/termination-log" -) - -// Volume represents a named volume in a pod that may be accessed by any containers in the pod. -type Volume struct { - // Required: This must be a DNS_LABEL. Each volume in a pod must have - // a unique name. - Name string - // The VolumeSource represents the location and type of a volume to mount. - // This is optional for now. If not specified, the Volume is implied to be an EmptyDir. - // This implied behavior is deprecated and will be removed in a future version. - // +optional - VolumeSource -} - -// VolumeSource represents the source location of a volume to mount. -// Only one of its members may be specified. -type VolumeSource struct { - // HostPath represents file or directory on the host machine that is - // directly exposed to the container. This is generally used for system - // agents or other privileged things that are allowed to see the host - // machine. Most containers will NOT need this. - // --- - // TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not - // mount host directories as read/write. - // +optional - HostPath *HostPathVolumeSource - // EmptyDir represents a temporary directory that shares a pod's lifetime. - // +optional - EmptyDir *EmptyDirVolumeSource - // GCEPersistentDisk represents a GCE Disk resource that is attached to a - // kubelet's host machine and then exposed to the pod. - // +optional - GCEPersistentDisk *GCEPersistentDiskVolumeSource - // AWSElasticBlockStore represents an AWS EBS disk that is attached to a - // kubelet's host machine and then exposed to the pod. - // +optional - AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource - // GitRepo represents a git repository at a particular revision. - // +optional - GitRepo *GitRepoVolumeSource - // Secret represents a secret that should populate this volume. - // +optional - Secret *SecretVolumeSource - // NFS represents an NFS mount on the host that shares a pod's lifetime - // +optional - NFS *NFSVolumeSource - // ISCSIVolumeSource represents an ISCSI Disk resource that is attached to a - // kubelet's host machine and then exposed to the pod. - // +optional - ISCSI *ISCSIVolumeSource - // Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime - // +optional - Glusterfs *GlusterfsVolumeSource - // PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace - // +optional - PersistentVolumeClaim *PersistentVolumeClaimVolumeSource - // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime - // +optional - RBD *RBDVolumeSource - - // Quobyte represents a Quobyte mount on the host that shares a pod's lifetime - // +optional - Quobyte *QuobyteVolumeSource - - // FlexVolume represents a generic volume resource that is - // provisioned/attached using an exec based plugin. This is an alpha feature and may change in future. - // +optional - FlexVolume *FlexVolumeSource - - // Cinder represents a cinder volume attached and mounted on kubelets host machine - // +optional - Cinder *CinderVolumeSource - - // CephFS represents a Cephfs mount on the host that shares a pod's lifetime - // +optional - CephFS *CephFSVolumeSource - - // Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running - // +optional - Flocker *FlockerVolumeSource - - // DownwardAPI represents metadata about the pod that should populate this volume - // +optional - DownwardAPI *DownwardAPIVolumeSource - // FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. - // +optional - FC *FCVolumeSource - // AzureFile represents an Azure File Service mount on the host and bind mount to the pod. - // +optional - AzureFile *AzureFileVolumeSource - // ConfigMap represents a configMap that should populate this volume - // +optional - ConfigMap *ConfigMapVolumeSource - // VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine - // +optional - VsphereVolume *VsphereVirtualDiskVolumeSource - // AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. - // +optional - AzureDisk *AzureDiskVolumeSource - // PhotonPersistentDisk represents a Photon Controller persistent disk attached and mounted on kubelets host machine - PhotonPersistentDisk *PhotonPersistentDiskVolumeSource - // Items for all in one resources secrets, configmaps, and downward API - Projected *ProjectedVolumeSource - // PortworxVolume represents a portworx volume attached and mounted on kubelets host machine - // +optional - PortworxVolume *PortworxVolumeSource - // ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. - // +optional - ScaleIO *ScaleIOVolumeSource - // StorageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod - // +optional - StorageOS *StorageOSVolumeSource -} - -// Similar to VolumeSource but meant for the administrator who creates PVs. -// Exactly one of its members must be set. -type PersistentVolumeSource struct { - // GCEPersistentDisk represents a GCE Disk resource that is attached to a - // kubelet's host machine and then exposed to the pod. - // +optional - GCEPersistentDisk *GCEPersistentDiskVolumeSource - // AWSElasticBlockStore represents an AWS EBS disk that is attached to a - // kubelet's host machine and then exposed to the pod. - // +optional - AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource - // HostPath represents a directory on the host. - // Provisioned by a developer or tester. - // This is useful for single-node development and testing only! - // On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster. - // +optional - HostPath *HostPathVolumeSource - // Glusterfs represents a Glusterfs volume that is attached to a host and exposed to the pod - // +optional - Glusterfs *GlusterfsVolumeSource - // NFS represents an NFS mount on the host that shares a pod's lifetime - // +optional - NFS *NFSVolumeSource - // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime - // +optional - RBD *RBDVolumeSource - // Quobyte represents a Quobyte mount on the host that shares a pod's lifetime - // +optional - Quobyte *QuobyteVolumeSource - // ISCSIVolumeSource represents an ISCSI resource that is attached to a - // kubelet's host machine and then exposed to the pod. - // +optional - ISCSI *ISCSIVolumeSource - // FlexVolume represents a generic volume resource that is - // provisioned/attached using an exec based plugin. This is an alpha feature and may change in future. - // +optional - FlexVolume *FlexVolumeSource - // Cinder represents a cinder volume attached and mounted on kubelets host machine - // +optional - Cinder *CinderVolumeSource - // CephFS represents a Ceph FS mount on the host that shares a pod's lifetime - // +optional - CephFS *CephFSPersistentVolumeSource - // FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. - // +optional - FC *FCVolumeSource - // Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running - // +optional - Flocker *FlockerVolumeSource - // AzureFile represents an Azure File Service mount on the host and bind mount to the pod. - // +optional - AzureFile *AzureFilePersistentVolumeSource - // VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine - // +optional - VsphereVolume *VsphereVirtualDiskVolumeSource - // AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. - // +optional - AzureDisk *AzureDiskVolumeSource - // PhotonPersistentDisk represents a Photon Controller persistent disk attached and mounted on kubelets host machine - PhotonPersistentDisk *PhotonPersistentDiskVolumeSource - // PortworxVolume represents a portworx volume attached and mounted on kubelets host machine - // +optional - PortworxVolume *PortworxVolumeSource - // ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. - // +optional - ScaleIO *ScaleIOVolumeSource - // Local represents directly-attached storage with node affinity - // +optional - Local *LocalVolumeSource - // StorageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod - // More info: https://releases.k8s.io/HEAD/examples/volumes/storageos/README.md - // +optional - StorageOS *StorageOSPersistentVolumeSource -} - -type PersistentVolumeClaimVolumeSource struct { - // ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume - ClaimName string - // Optional: Defaults to false (read/write). ReadOnly here - // will force the ReadOnly setting in VolumeMounts - // +optional - ReadOnly bool -} - -const ( - // BetaStorageClassAnnotation represents the beta/previous StorageClass annotation. - // It's currently still used and will be held for backwards compatibility - BetaStorageClassAnnotation = "volume.beta.kubernetes.io/storage-class" - - // MountOptionAnnotation defines mount option annotation used in PVs - MountOptionAnnotation = "volume.beta.kubernetes.io/mount-options" - - // AlphaStorageNodeAffinityAnnotation defines node affinity policies for a PersistentVolume. - // Value is a string of the json representation of type NodeAffinity - AlphaStorageNodeAffinityAnnotation = "volume.alpha.kubernetes.io/node-affinity" -) - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -type PersistentVolume struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - //Spec defines a persistent volume owned by the cluster - // +optional - Spec PersistentVolumeSpec - - // Status represents the current information about persistent volume. - // +optional - Status PersistentVolumeStatus -} - -type PersistentVolumeSpec struct { - // Resources represents the actual resources of the volume - Capacity ResourceList - // Source represents the location and type of a volume to mount. - PersistentVolumeSource - // AccessModes contains all ways the volume can be mounted - // +optional - AccessModes []PersistentVolumeAccessMode - // ClaimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim. - // ClaimRef is expected to be non-nil when bound. - // claim.VolumeName is the authoritative bind between PV and PVC. - // When set to non-nil value, PVC.Spec.Selector of the referenced PVC is - // ignored, i.e. labels of this PV do not need to match PVC selector. - // +optional - ClaimRef *ObjectReference - // Optional: what happens to a persistent volume when released from its claim. - // +optional - PersistentVolumeReclaimPolicy PersistentVolumeReclaimPolicy - // Name of StorageClass to which this persistent volume belongs. Empty value - // means that this volume does not belong to any StorageClass. - // +optional - StorageClassName string - // A list of mount options, e.g. ["ro", "soft"]. Not validated - mount will - // simply fail if one is invalid. - // +optional - MountOptions []string -} - -// PersistentVolumeReclaimPolicy describes a policy for end-of-life maintenance of persistent volumes -type PersistentVolumeReclaimPolicy string - -const ( - // PersistentVolumeReclaimRecycle means the volume will be recycled back into the pool of unbound persistent volumes on release from its claim. - // The volume plugin must support Recycling. - PersistentVolumeReclaimRecycle PersistentVolumeReclaimPolicy = "Recycle" - // PersistentVolumeReclaimDelete means the volume will be deleted from Kubernetes on release from its claim. - // The volume plugin must support Deletion. - PersistentVolumeReclaimDelete PersistentVolumeReclaimPolicy = "Delete" - // PersistentVolumeReclaimRetain means the volume will be left in its current phase (Released) for manual reclamation by the administrator. - // The default policy is Retain. - PersistentVolumeReclaimRetain PersistentVolumeReclaimPolicy = "Retain" -) - -type PersistentVolumeStatus struct { - // Phase indicates if a volume is available, bound to a claim, or released by a claim - // +optional - Phase PersistentVolumePhase - // A human-readable message indicating details about why the volume is in this state. - // +optional - Message string - // Reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI - // +optional - Reason string -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -type PersistentVolumeList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - Items []PersistentVolume -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PersistentVolumeClaim is a user's request for and claim to a persistent volume -type PersistentVolumeClaim struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // Spec defines the volume requested by a pod author - // +optional - Spec PersistentVolumeClaimSpec - - // Status represents the current information about a claim - // +optional - Status PersistentVolumeClaimStatus -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -type PersistentVolumeClaimList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - Items []PersistentVolumeClaim -} - -// PersistentVolumeClaimSpec describes the common attributes of storage devices -// and allows a Source for provider-specific attributes -type PersistentVolumeClaimSpec struct { - // Contains the types of access modes required - // +optional - AccessModes []PersistentVolumeAccessMode - // A label query over volumes to consider for binding. This selector is - // ignored when VolumeName is set - // +optional - Selector *metav1.LabelSelector - // Resources represents the minimum resources required - // +optional - Resources ResourceRequirements - // VolumeName is the binding reference to the PersistentVolume backing this - // claim. When set to non-empty value Selector is not evaluated - // +optional - VolumeName string - // Name of the StorageClass required by the claim. - // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#class-1 - // +optional - StorageClassName *string -} - -type PersistentVolumeClaimConditionType string - -// These are valid conditions of Pvc -const ( - // An user trigger resize of pvc has been started - PersistentVolumeClaimResizing PersistentVolumeClaimConditionType = "Resizing" -) - -type PersistentVolumeClaimCondition struct { - Type PersistentVolumeClaimConditionType - Status ConditionStatus - // +optional - LastProbeTime metav1.Time - // +optional - LastTransitionTime metav1.Time - // +optional - Reason string - // +optional - Message string -} - -type PersistentVolumeClaimStatus struct { - // Phase represents the current phase of PersistentVolumeClaim - // +optional - Phase PersistentVolumeClaimPhase - // AccessModes contains all ways the volume backing the PVC can be mounted - // +optional - AccessModes []PersistentVolumeAccessMode - // Represents the actual resources of the underlying volume - // +optional - Capacity ResourceList - // +optional - Conditions []PersistentVolumeClaimCondition -} - -type PersistentVolumeAccessMode string - -const ( - // can be mounted read/write mode to exactly 1 host - ReadWriteOnce PersistentVolumeAccessMode = "ReadWriteOnce" - // can be mounted in read-only mode to many hosts - ReadOnlyMany PersistentVolumeAccessMode = "ReadOnlyMany" - // can be mounted in read/write mode to many hosts - ReadWriteMany PersistentVolumeAccessMode = "ReadWriteMany" -) - -type PersistentVolumePhase string - -const ( - // used for PersistentVolumes that are not available - VolumePending PersistentVolumePhase = "Pending" - // used for PersistentVolumes that are not yet bound - // Available volumes are held by the binder and matched to PersistentVolumeClaims - VolumeAvailable PersistentVolumePhase = "Available" - // used for PersistentVolumes that are bound - VolumeBound PersistentVolumePhase = "Bound" - // used for PersistentVolumes where the bound PersistentVolumeClaim was deleted - // released volumes must be recycled before becoming available again - // this phase is used by the persistent volume claim binder to signal to another process to reclaim the resource - VolumeReleased PersistentVolumePhase = "Released" - // used for PersistentVolumes that failed to be correctly recycled or deleted after being released from a claim - VolumeFailed PersistentVolumePhase = "Failed" -) - -type PersistentVolumeClaimPhase string - -const ( - // used for PersistentVolumeClaims that are not yet bound - ClaimPending PersistentVolumeClaimPhase = "Pending" - // used for PersistentVolumeClaims that are bound - ClaimBound PersistentVolumeClaimPhase = "Bound" - // used for PersistentVolumeClaims that lost their underlying - // PersistentVolume. The claim was bound to a PersistentVolume and this - // volume does not exist any longer and all data on it was lost. - ClaimLost PersistentVolumeClaimPhase = "Lost" -) - -type HostPathType string - -const ( - // For backwards compatible, leave it empty if unset - HostPathUnset HostPathType = "" - // If nothing exists at the given path, an empty directory will be created there - // as needed with file mode 0755, having the same group and ownership with Kubelet. - HostPathDirectoryOrCreate HostPathType = "DirectoryOrCreate" - // A directory must exist at the given path - HostPathDirectory HostPathType = "Directory" - // If nothing exists at the given path, an empty file will be created there - // as needed with file mode 0644, having the same group and ownership with Kubelet. - HostPathFileOrCreate HostPathType = "FileOrCreate" - // A file must exist at the given path - HostPathFile HostPathType = "File" - // A UNIX socket must exist at the given path - HostPathSocket HostPathType = "Socket" - // A character device must exist at the given path - HostPathCharDev HostPathType = "CharDevice" - // A block device must exist at the given path - HostPathBlockDev HostPathType = "BlockDevice" -) - -// Represents a host path mapped into a pod. -// Host path volumes do not support ownership management or SELinux relabeling. -type HostPathVolumeSource struct { - // If the path is a symlink, it will follow the link to the real path. - Path string - // Defaults to "" - Type *HostPathType -} - -// Represents an empty directory for a pod. -// Empty directory volumes support ownership management and SELinux relabeling. -type EmptyDirVolumeSource struct { - // TODO: Longer term we want to represent the selection of underlying - // media more like a scheduling problem - user says what traits they - // need, we give them a backing store that satisfies that. For now - // this will cover the most common needs. - // Optional: what type of storage medium should back this directory. - // The default is "" which means to use the node's default medium. - // +optional - Medium StorageMedium - // Total amount of local storage required for this EmptyDir volume. - // The size limit is also applicable for memory medium. - // The maximum usage on memory medium EmptyDir would be the minimum value between - // the SizeLimit specified here and the sum of memory limits of all containers in a pod. - // The default is nil which means that the limit is undefined. - // More info: http://kubernetes.io/docs/user-guide/volumes#emptydir - // +optional - SizeLimit *resource.Quantity -} - -// StorageMedium defines ways that storage can be allocated to a volume. -type StorageMedium string - -const ( - StorageMediumDefault StorageMedium = "" // use whatever the default is for the node - StorageMediumMemory StorageMedium = "Memory" // use memory (tmpfs) - StorageMediumHugePages StorageMedium = "HugePages" // use hugepages -) - -// Protocol defines network protocols supported for things like container ports. -type Protocol string - -const ( - // ProtocolTCP is the TCP protocol. - ProtocolTCP Protocol = "TCP" - // ProtocolUDP is the UDP protocol. - ProtocolUDP Protocol = "UDP" -) - -// Represents a Persistent Disk resource in Google Compute Engine. -// -// A GCE PD must exist before mounting to a container. The disk must -// also be in the same GCE project and zone as the kubelet. A GCE PD -// can only be mounted as read/write once or read-only many times. GCE -// PDs support ownership management and SELinux relabeling. -type GCEPersistentDiskVolumeSource struct { - // Unique name of the PD resource. Used to identify the disk in GCE - PDName string - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // TODO: how do we prevent errors in the filesystem from compromising the machine - // +optional - FSType string - // Optional: Partition on the disk to mount. - // If omitted, kubelet will attempt to mount the device name. - // Ex. For /dev/sda1, this field is "1", for /dev/sda, this field is 0 or empty. - // +optional - Partition int32 - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool -} - -// Represents an ISCSI disk. -// ISCSI volumes can only be mounted as read/write once. -// ISCSI volumes support ownership management and SELinux relabeling. -type ISCSIVolumeSource struct { - // Required: iSCSI target portal - // the portal is either an IP or ip_addr:port if port is other than default (typically TCP ports 860 and 3260) - // +optional - TargetPortal string - // Required: target iSCSI Qualified Name - // +optional - IQN string - // Required: iSCSI target lun number - // +optional - Lun int32 - // Optional: Defaults to 'default' (tcp). iSCSI interface name that uses an iSCSI transport. - // +optional - ISCSIInterface string - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // TODO: how do we prevent errors in the filesystem from compromising the machine - // +optional - FSType string - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool - // Optional: list of iSCSI target portal ips for high availability. - // the portal is either an IP or ip_addr:port if port is other than default (typically TCP ports 860 and 3260) - // +optional - Portals []string - // Optional: whether support iSCSI Discovery CHAP authentication - // +optional - DiscoveryCHAPAuth bool - // Optional: whether support iSCSI Session CHAP authentication - // +optional - SessionCHAPAuth bool - // Optional: CHAP secret for iSCSI target and initiator authentication. - // The secret is used if either DiscoveryCHAPAuth or SessionCHAPAuth is true - // +optional - SecretRef *LocalObjectReference - // Optional: Custom initiator name per volume. - // If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface - // : will be created for the connection. - // +optional - InitiatorName *string -} - -// Represents a Fibre Channel volume. -// Fibre Channel volumes can only be mounted as read/write once. -// Fibre Channel volumes support ownership management and SELinux relabeling. -type FCVolumeSource struct { - // Optional: FC target worldwide names (WWNs) - // +optional - TargetWWNs []string - // Optional: FC target lun number - // +optional - Lun *int32 - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // TODO: how do we prevent errors in the filesystem from compromising the machine - // +optional - FSType string - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool - // Optional: FC volume World Wide Identifiers (WWIDs) - // Either WWIDs or TargetWWNs and Lun must be set, but not both simultaneously. - // +optional - WWIDs []string -} - -// FlexVolume represents a generic volume resource that is -// provisioned/attached using an exec based plugin. This is an alpha feature and may change in future. -type FlexVolumeSource struct { - // Driver is the name of the driver to use for this volume. - Driver string - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. - // +optional - FSType string - // Optional: SecretRef is reference to the secret object containing - // sensitive information to pass to the plugin scripts. This may be - // empty if no secret object is specified. If the secret object - // contains more than one secret, all secrets are passed to the plugin - // scripts. - // +optional - SecretRef *LocalObjectReference - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool - // Optional: Extra driver options if any. - // +optional - Options map[string]string -} - -// Represents a Persistent Disk resource in AWS. -// -// An AWS EBS disk must exist before mounting to a container. The disk -// must also be in the same AWS zone as the kubelet. An AWS EBS disk -// can only be mounted as read/write once. AWS EBS volumes support -// ownership management and SELinux relabeling. -type AWSElasticBlockStoreVolumeSource struct { - // Unique id of the persistent disk resource. Used to identify the disk in AWS - VolumeID string - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // TODO: how do we prevent errors in the filesystem from compromising the machine - // +optional - FSType string - // Optional: Partition on the disk to mount. - // If omitted, kubelet will attempt to mount the device name. - // Ex. For /dev/sda1, this field is "1", for /dev/sda, this field is 0 or empty. - // +optional - Partition int32 - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool -} - -// Represents a volume that is populated with the contents of a git repository. -// Git repo volumes do not support ownership management. -// Git repo volumes support SELinux relabeling. -type GitRepoVolumeSource struct { - // Repository URL - Repository string - // Commit hash, this is optional - // +optional - Revision string - // Clone target, this is optional - // Must not contain or start with '..'. If '.' is supplied, the volume directory will be the - // git repository. Otherwise, if specified, the volume will contain the git repository in - // the subdirectory with the given name. - // +optional - Directory string - // TODO: Consider credentials here. -} - -// Adapts a Secret into a volume. -// -// The contents of the target Secret's Data field will be presented in a volume -// as files using the keys in the Data field as the file names. -// Secret volumes support ownership management and SELinux relabeling. -type SecretVolumeSource struct { - // Name of the secret in the pod's namespace to use. - // +optional - SecretName string - // If unspecified, each key-value pair in the Data field of the referenced - // Secret will be projected into the volume as a file whose name is the - // key and content is the value. If specified, the listed keys will be - // projected into the specified paths, and unlisted keys will not be - // present. If a key is specified which is not present in the Secret, - // the volume setup will error unless it is marked optional. Paths must be - // relative and may not contain the '..' path or start with '..'. - // +optional - Items []KeyToPath - // Mode bits to use on created files by default. Must be a value between - // 0 and 0777. - // Directories within the path are not affected by this setting. - // This might be in conflict with other options that affect the file - // mode, like fsGroup, and the result can be other mode bits set. - // +optional - DefaultMode *int32 - // Specify whether the Secret or its key must be defined - // +optional - Optional *bool -} - -// Adapts a secret into a projected volume. -// -// The contents of the target Secret's Data field will be presented in a -// projected volume as files using the keys in the Data field as the file names. -// Note that this is identical to a secret volume source without the default -// mode. -type SecretProjection struct { - LocalObjectReference - // If unspecified, each key-value pair in the Data field of the referenced - // Secret will be projected into the volume as a file whose name is the - // key and content is the value. If specified, the listed keys will be - // projected into the specified paths, and unlisted keys will not be - // present. If a key is specified which is not present in the Secret, - // the volume setup will error unless it is marked optional. Paths must be - // relative and may not contain the '..' path or start with '..'. - // +optional - Items []KeyToPath - // Specify whether the Secret or its key must be defined - // +optional - Optional *bool -} - -// Represents an NFS mount that lasts the lifetime of a pod. -// NFS volumes do not support ownership management or SELinux relabeling. -type NFSVolumeSource struct { - // Server is the hostname or IP address of the NFS server - Server string - - // Path is the exported NFS share - Path string - - // Optional: Defaults to false (read/write). ReadOnly here will force - // the NFS export to be mounted with read-only permissions - // +optional - ReadOnly bool -} - -// Represents a Quobyte mount that lasts the lifetime of a pod. -// Quobyte volumes do not support ownership management or SELinux relabeling. -type QuobyteVolumeSource struct { - // Registry represents a single or multiple Quobyte Registry services - // specified as a string as host:port pair (multiple entries are separated with commas) - // which acts as the central registry for volumes - Registry string - - // Volume is a string that references an already created Quobyte volume by name. - Volume string - - // Defaults to false (read/write). ReadOnly here will force - // the Quobyte to be mounted with read-only permissions - // +optional - ReadOnly bool - - // User to map volume access to - // Defaults to the root user - // +optional - User string - - // Group to map volume access to - // Default is no group - // +optional - Group string -} - -// Represents a Glusterfs mount that lasts the lifetime of a pod. -// Glusterfs volumes do not support ownership management or SELinux relabeling. -type GlusterfsVolumeSource struct { - // Required: EndpointsName is the endpoint name that details Glusterfs topology - EndpointsName string - - // Required: Path is the Glusterfs volume path - Path string - - // Optional: Defaults to false (read/write). ReadOnly here will force - // the Glusterfs to be mounted with read-only permissions - // +optional - ReadOnly bool -} - -// Represents a Rados Block Device mount that lasts the lifetime of a pod. -// RBD volumes support ownership management and SELinux relabeling. -type RBDVolumeSource struct { - // Required: CephMonitors is a collection of Ceph monitors - CephMonitors []string - // Required: RBDImage is the rados image name - RBDImage string - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // TODO: how do we prevent errors in the filesystem from compromising the machine - // +optional - FSType string - // Optional: RadosPool is the rados pool name,default is rbd - // +optional - RBDPool string - // Optional: RBDUser is the rados user name, default is admin - // +optional - RadosUser string - // Optional: Keyring is the path to key ring for RBDUser, default is /etc/ceph/keyring - // +optional - Keyring string - // Optional: SecretRef is name of the authentication secret for RBDUser, default is nil. - // +optional - SecretRef *LocalObjectReference - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool -} - -// Represents a cinder volume resource in Openstack. A Cinder volume -// must exist before mounting to a container. The volume must also be -// in the same region as the kubelet. Cinder volumes support ownership -// management and SELinux relabeling. -type CinderVolumeSource struct { - // Unique id of the volume used to identify the cinder volume - VolumeID string - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // +optional - FSType string - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool -} - -// Represents a Ceph Filesystem mount that lasts the lifetime of a pod -// Cephfs volumes do not support ownership management or SELinux relabeling. -type CephFSVolumeSource struct { - // Required: Monitors is a collection of Ceph monitors - Monitors []string - // Optional: Used as the mounted root, rather than the full Ceph tree, default is / - // +optional - Path string - // Optional: User is the rados user name, default is admin - // +optional - User string - // Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret - // +optional - SecretFile string - // Optional: SecretRef is reference to the authentication secret for User, default is empty. - // +optional - SecretRef *LocalObjectReference - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool -} - -// SecretReference represents a Secret Reference. It has enough information to retrieve secret -// in any namespace -type SecretReference struct { - // Name is unique within a namespace to reference a secret resource. - // +optional - Name string - // Namespace defines the space within which the secret name must be unique. - // +optional - Namespace string -} - -// Represents a Ceph Filesystem mount that lasts the lifetime of a pod -// Cephfs volumes do not support ownership management or SELinux relabeling. -type CephFSPersistentVolumeSource struct { - // Required: Monitors is a collection of Ceph monitors - Monitors []string - // Optional: Used as the mounted root, rather than the full Ceph tree, default is / - // +optional - Path string - // Optional: User is the rados user name, default is admin - // +optional - User string - // Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret - // +optional - SecretFile string - // Optional: SecretRef is reference to the authentication secret for User, default is empty. - // +optional - SecretRef *SecretReference - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool -} - -// Represents a Flocker volume mounted by the Flocker agent. -// One and only one of datasetName and datasetUUID should be set. -// Flocker volumes do not support ownership management or SELinux relabeling. -type FlockerVolumeSource struct { - // Name of the dataset stored as metadata -> name on the dataset for Flocker - // should be considered as deprecated - // +optional - DatasetName string - // UUID of the dataset. This is unique identifier of a Flocker dataset - // +optional - DatasetUUID string -} - -// Represents a volume containing downward API info. -// Downward API volumes support ownership management and SELinux relabeling. -type DownwardAPIVolumeSource struct { - // Items is a list of DownwardAPIVolume file - // +optional - Items []DownwardAPIVolumeFile - // Mode bits to use on created files by default. Must be a value between - // 0 and 0777. - // Directories within the path are not affected by this setting. - // This might be in conflict with other options that affect the file - // mode, like fsGroup, and the result can be other mode bits set. - // +optional - DefaultMode *int32 -} - -// Represents a single file containing information from the downward API -type DownwardAPIVolumeFile struct { - // Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..' - Path string - // Required: Selects a field of the pod: only annotations, labels, name, namespace and uid are supported. - // +optional - FieldRef *ObjectFieldSelector - // Selects a resource of the container: only resources limits and requests - // (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. - // +optional - ResourceFieldRef *ResourceFieldSelector - // Optional: mode bits to use on this file, must be a value between 0 - // and 0777. If not specified, the volume defaultMode will be used. - // This might be in conflict with other options that affect the file - // mode, like fsGroup, and the result can be other mode bits set. - // +optional - Mode *int32 -} - -// Represents downward API info for projecting into a projected volume. -// Note that this is identical to a downwardAPI volume source without the default -// mode. -type DownwardAPIProjection struct { - // Items is a list of DownwardAPIVolume file - // +optional - Items []DownwardAPIVolumeFile -} - -// AzureFile represents an Azure File Service mount on the host and bind mount to the pod. -type AzureFileVolumeSource struct { - // the name of secret that contains Azure Storage Account Name and Key - SecretName string - // Share Name - ShareName string - // Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool -} - -// AzureFile represents an Azure File Service mount on the host and bind mount to the pod. -type AzureFilePersistentVolumeSource struct { - // the name of secret that contains Azure Storage Account Name and Key - SecretName string - // Share Name - ShareName string - // Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool - // the namespace of the secret that contains Azure Storage Account Name and Key - // default is the same as the Pod - // +optional - SecretNamespace *string -} - -// Represents a vSphere volume resource. -type VsphereVirtualDiskVolumeSource struct { - // Path that identifies vSphere volume vmdk - VolumePath string - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // +optional - FSType string - // Storage Policy Based Management (SPBM) profile name. - // +optional - StoragePolicyName string - // Storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName. - // +optional - StoragePolicyID string -} - -// Represents a Photon Controller persistent disk resource. -type PhotonPersistentDiskVolumeSource struct { - // ID that identifies Photon Controller persistent disk - PdID string - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - FSType string -} - -// PortworxVolumeSource represents a Portworx volume resource. -type PortworxVolumeSource struct { - // VolumeID uniquely identifies a Portworx volume - VolumeID string - // FSType represents the filesystem type to mount - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. - // +optional - FSType string - // Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool -} - -type AzureDataDiskCachingMode string -type AzureDataDiskKind string - -const ( - AzureDataDiskCachingNone AzureDataDiskCachingMode = "None" - AzureDataDiskCachingReadOnly AzureDataDiskCachingMode = "ReadOnly" - AzureDataDiskCachingReadWrite AzureDataDiskCachingMode = "ReadWrite" - - AzureSharedBlobDisk AzureDataDiskKind = "Shared" - AzureDedicatedBlobDisk AzureDataDiskKind = "Dedicated" - AzureManagedDisk AzureDataDiskKind = "Managed" -) - -// AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. -type AzureDiskVolumeSource struct { - // The Name of the data disk in the blob storage - DiskName string - // The URI of the data disk in the blob storage - DataDiskURI string - // Host Caching mode: None, Read Only, Read Write. - // +optional - CachingMode *AzureDataDiskCachingMode - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // +optional - FSType *string - // Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly *bool - // Expected values Shared: mulitple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared - Kind *AzureDataDiskKind -} - -// ScaleIOVolumeSource represents a persistent ScaleIO volume -type ScaleIOVolumeSource struct { - // The host address of the ScaleIO API Gateway. - Gateway string - // The name of the storage system as configured in ScaleIO. - System string - // SecretRef references to the secret for ScaleIO user and other - // sensitive information. If this is not provided, Login operation will fail. - SecretRef *LocalObjectReference - // Flag to enable/disable SSL communication with Gateway, default false - // +optional - SSLEnabled bool - // The name of the Protection Domain for the configured storage (defaults to "default"). - // +optional - ProtectionDomain string - // The Storage Pool associated with the protection domain (defaults to "default"). - // +optional - StoragePool string - // Indicates whether the storage for a volume should be thick or thin (defaults to "thin"). - // +optional - StorageMode string - // The name of a volume already created in the ScaleIO system - // that is associated with this volume source. - VolumeName string - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // +optional - FSType string - // Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool -} - -// Represents a StorageOS persistent volume resource. -type StorageOSVolumeSource struct { - // VolumeName is the human-readable name of the StorageOS volume. Volume - // names are only unique within a namespace. - VolumeName string - // VolumeNamespace specifies the scope of the volume within StorageOS. If no - // namespace is specified then the Pod's namespace will be used. This allows the - // Kubernetes name scoping to be mirrored within StorageOS for tighter integration. - // Set VolumeName to any name to override the default behaviour. - // Set to "default" if you are not using namespaces within StorageOS. - // Namespaces that do not pre-exist within StorageOS will be created. - // +optional - VolumeNamespace string - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // +optional - FSType string - // Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool - // SecretRef specifies the secret to use for obtaining the StorageOS API - // credentials. If not specified, default values will be attempted. - // +optional - SecretRef *LocalObjectReference -} - -// Represents a StorageOS persistent volume resource. -type StorageOSPersistentVolumeSource struct { - // VolumeName is the human-readable name of the StorageOS volume. Volume - // names are only unique within a namespace. - VolumeName string - // VolumeNamespace specifies the scope of the volume within StorageOS. If no - // namespace is specified then the Pod's namespace will be used. This allows the - // Kubernetes name scoping to be mirrored within StorageOS for tighter integration. - // Set VolumeName to any name to override the default behaviour. - // Set to "default" if you are not using namespaces within StorageOS. - // Namespaces that do not pre-exist within StorageOS will be created. - // +optional - VolumeNamespace string - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // +optional - FSType string - // Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool - // SecretRef specifies the secret to use for obtaining the StorageOS API - // credentials. If not specified, default values will be attempted. - // +optional - SecretRef *ObjectReference -} - -// Adapts a ConfigMap into a volume. -// -// The contents of the target ConfigMap's Data field will be presented in a -// volume as files using the keys in the Data field as the file names, unless -// the items element is populated with specific mappings of keys to paths. -// ConfigMap volumes support ownership management and SELinux relabeling. -type ConfigMapVolumeSource struct { - LocalObjectReference - // If unspecified, each key-value pair in the Data field of the referenced - // ConfigMap will be projected into the volume as a file whose name is the - // key and content is the value. If specified, the listed keys will be - // projected into the specified paths, and unlisted keys will not be - // present. If a key is specified which is not present in the ConfigMap, - // the volume setup will error unless it is marked optional. Paths must be - // relative and may not contain the '..' path or start with '..'. - // +optional - Items []KeyToPath - // Mode bits to use on created files by default. Must be a value between - // 0 and 0777. - // Directories within the path are not affected by this setting. - // This might be in conflict with other options that affect the file - // mode, like fsGroup, and the result can be other mode bits set. - // +optional - DefaultMode *int32 - // Specify whether the ConfigMap or it's keys must be defined - // +optional - Optional *bool -} - -// Adapts a ConfigMap into a projected volume. -// -// The contents of the target ConfigMap's Data field will be presented in a -// projected volume as files using the keys in the Data field as the file names, -// unless the items element is populated with specific mappings of keys to paths. -// Note that this is identical to a configmap volume source without the default -// mode. -type ConfigMapProjection struct { - LocalObjectReference - // If unspecified, each key-value pair in the Data field of the referenced - // ConfigMap will be projected into the volume as a file whose name is the - // key and content is the value. If specified, the listed keys will be - // projected into the specified paths, and unlisted keys will not be - // present. If a key is specified which is not present in the ConfigMap, - // the volume setup will error unless it is marked optional. Paths must be - // relative and may not contain the '..' path or start with '..'. - // +optional - Items []KeyToPath - // Specify whether the ConfigMap or it's keys must be defined - // +optional - Optional *bool -} - -// Represents a projected volume source -type ProjectedVolumeSource struct { - // list of volume projections - Sources []VolumeProjection - // Mode bits to use on created files by default. Must be a value between - // 0 and 0777. - // Directories within the path are not affected by this setting. - // This might be in conflict with other options that affect the file - // mode, like fsGroup, and the result can be other mode bits set. - // +optional - DefaultMode *int32 -} - -// Projection that may be projected along with other supported volume types -type VolumeProjection struct { - // all types below are the supported types for projection into the same volume - - // information about the secret data to project - Secret *SecretProjection - // information about the downwardAPI data to project - DownwardAPI *DownwardAPIProjection - // information about the configMap data to project - ConfigMap *ConfigMapProjection -} - -// Maps a string key to a path within a volume. -type KeyToPath struct { - // The key to project. - Key string - - // The relative path of the file to map the key to. - // May not be an absolute path. - // May not contain the path element '..'. - // May not start with the string '..'. - Path string - // Optional: mode bits to use on this file, should be a value between 0 - // and 0777. If not specified, the volume defaultMode will be used. - // This might be in conflict with other options that affect the file - // mode, like fsGroup, and the result can be other mode bits set. - // +optional - Mode *int32 -} - -// Local represents directly-attached storage with node affinity -type LocalVolumeSource struct { - // The full path to the volume on the node - // For alpha, this path must be a directory - // Once block as a source is supported, then this path can point to a block device - Path string -} - -// ContainerPort represents a network port in a single container -type ContainerPort struct { - // Optional: If specified, this must be an IANA_SVC_NAME Each named port - // in a pod must have a unique name. - // +optional - Name string - // Optional: If specified, this must be a valid port number, 0 < x < 65536. - // If HostNetwork is specified, this must match ContainerPort. - // +optional - HostPort int32 - // Required: This must be a valid port number, 0 < x < 65536. - ContainerPort int32 - // Required: Supports "TCP" and "UDP". - // +optional - Protocol Protocol - // Optional: What host IP to bind the external port to. - // +optional - HostIP string -} - -// VolumeMount describes a mounting of a Volume within a container. -type VolumeMount struct { - // Required: This must match the Name of a Volume [above]. - Name string - // Optional: Defaults to false (read-write). - // +optional - ReadOnly bool - // Required. Must not contain ':'. - MountPath string - // Path within the volume from which the container's volume should be mounted. - // Defaults to "" (volume's root). - // +optional - SubPath string - // mountPropagation determines how mounts are propagated from the host - // to container and the other way around. - // When not set, MountPropagationHostToContainer is used. - // This field is alpha in 1.8 and can be reworked or removed in a future - // release. - // +optional - MountPropagation *MountPropagationMode -} - -// MountPropagationMode describes mount propagation. -type MountPropagationMode string - -const ( - // MountPropagationHostToContainer means that the volume in a container will - // receive new mounts from the host or other containers, but filesystems - // mounted inside the container won't be propagated to the host or other - // containers. - // Note that this mode is recursively applied to all mounts in the volume - // ("rslave" in Linux terminology). - MountPropagationHostToContainer MountPropagationMode = "HostToContainer" - // MountPropagationBidirectional means that the volume in a container will - // receive new mounts from the host or other containers, and its own mounts - // will be propagated from the container to the host or other containers. - // Note that this mode is recursively applied to all mounts in the volume - // ("rshared" in Linux terminology). - MountPropagationBidirectional MountPropagationMode = "Bidirectional" -) - -// EnvVar represents an environment variable present in a Container. -type EnvVar struct { - // Required: This must be a C_IDENTIFIER. - Name string - // Optional: no more than one of the following may be specified. - // Optional: Defaults to ""; variable references $(VAR_NAME) are expanded - // using the previous defined environment variables in the container and - // any service environment variables. If a variable cannot be resolved, - // the reference in the input string will be unchanged. The $(VAR_NAME) - // syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped - // references will never be expanded, regardless of whether the variable - // exists or not. - // +optional - Value string - // Optional: Specifies a source the value of this var should come from. - // +optional - ValueFrom *EnvVarSource -} - -// EnvVarSource represents a source for the value of an EnvVar. -// Only one of its fields may be set. -type EnvVarSource struct { - // Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, - // metadata.uid, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP. - // +optional - FieldRef *ObjectFieldSelector - // Selects a resource of the container: only resources limits and requests - // (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. - // +optional - ResourceFieldRef *ResourceFieldSelector - // Selects a key of a ConfigMap. - // +optional - ConfigMapKeyRef *ConfigMapKeySelector - // Selects a key of a secret in the pod's namespace. - // +optional - SecretKeyRef *SecretKeySelector -} - -// ObjectFieldSelector selects an APIVersioned field of an object. -type ObjectFieldSelector struct { - // Required: Version of the schema the FieldPath is written in terms of. - // If no value is specified, it will be defaulted to the APIVersion of the - // enclosing object. - APIVersion string - // Required: Path of the field to select in the specified API version - FieldPath string -} - -// ResourceFieldSelector represents container resources (cpu, memory) and their output format -type ResourceFieldSelector struct { - // Container name: required for volumes, optional for env vars - // +optional - ContainerName string - // Required: resource to select - Resource string - // Specifies the output format of the exposed resources, defaults to "1" - // +optional - Divisor resource.Quantity -} - -// Selects a key from a ConfigMap. -type ConfigMapKeySelector struct { - // The ConfigMap to select from. - LocalObjectReference - // The key to select. - Key string - // Specify whether the ConfigMap or it's key must be defined - // +optional - Optional *bool -} - -// SecretKeySelector selects a key of a Secret. -type SecretKeySelector struct { - // The name of the secret in the pod's namespace to select from. - LocalObjectReference - // The key of the secret to select from. Must be a valid secret key. - Key string - // Specify whether the Secret or it's key must be defined - // +optional - Optional *bool -} - -// EnvFromSource represents the source of a set of ConfigMaps -type EnvFromSource struct { - // An optional identifier to prepend to each key in the ConfigMap. - // +optional - Prefix string - // The ConfigMap to select from. - //+optional - ConfigMapRef *ConfigMapEnvSource - // The Secret to select from. - //+optional - SecretRef *SecretEnvSource -} - -// ConfigMapEnvSource selects a ConfigMap to populate the environment -// variables with. -// -// The contents of the target ConfigMap's Data field will represent the -// key-value pairs as environment variables. -type ConfigMapEnvSource struct { - // The ConfigMap to select from. - LocalObjectReference - // Specify whether the ConfigMap must be defined - // +optional - Optional *bool -} - -// SecretEnvSource selects a Secret to populate the environment -// variables with. -// -// The contents of the target Secret's Data field will represent the -// key-value pairs as environment variables. -type SecretEnvSource struct { - // The Secret to select from. - LocalObjectReference - // Specify whether the Secret must be defined - // +optional - Optional *bool -} - -// HTTPHeader describes a custom header to be used in HTTP probes -type HTTPHeader struct { - // The header field name - Name string - // The header field value - Value string -} - -// HTTPGetAction describes an action based on HTTP Get requests. -type HTTPGetAction struct { - // Optional: Path to access on the HTTP server. - // +optional - Path string - // Required: Name or number of the port to access on the container. - // +optional - Port intstr.IntOrString - // Optional: Host name to connect to, defaults to the pod IP. You - // probably want to set "Host" in httpHeaders instead. - // +optional - Host string - // Optional: Scheme to use for connecting to the host, defaults to HTTP. - // +optional - Scheme URIScheme - // Optional: Custom headers to set in the request. HTTP allows repeated headers. - // +optional - HTTPHeaders []HTTPHeader -} - -// URIScheme identifies the scheme used for connection to a host for Get actions -type URIScheme string - -const ( - // URISchemeHTTP means that the scheme used will be http:// - URISchemeHTTP URIScheme = "HTTP" - // URISchemeHTTPS means that the scheme used will be https:// - URISchemeHTTPS URIScheme = "HTTPS" -) - -// TCPSocketAction describes an action based on opening a socket -type TCPSocketAction struct { - // Required: Port to connect to. - // +optional - Port intstr.IntOrString - // Optional: Host name to connect to, defaults to the pod IP. - // +optional - Host string -} - -// ExecAction describes a "run in container" action. -type ExecAction struct { - // Command is the command line to execute inside the container, the working directory for the - // command is root ('/') in the container's filesystem. The command is simply exec'd, it is - // not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use - // a shell, you need to explicitly call out to that shell. - // +optional - Command []string -} - -// Probe describes a health check to be performed against a container to determine whether it is -// alive or ready to receive traffic. -type Probe struct { - // The action taken to determine the health of a container - Handler - // Length of time before health checking is activated. In seconds. - // +optional - InitialDelaySeconds int32 - // Length of time before health checking times out. In seconds. - // +optional - TimeoutSeconds int32 - // How often (in seconds) to perform the probe. - // +optional - PeriodSeconds int32 - // Minimum consecutive successes for the probe to be considered successful after having failed. - // Must be 1 for liveness. - // +optional - SuccessThreshold int32 - // Minimum consecutive failures for the probe to be considered failed after having succeeded. - // +optional - FailureThreshold int32 -} - -// PullPolicy describes a policy for if/when to pull a container image -type PullPolicy string - -const ( - // PullAlways means that kubelet always attempts to pull the latest image. Container will fail If the pull fails. - PullAlways PullPolicy = "Always" - // PullNever means that kubelet never pulls an image, but only uses a local image. Container will fail if the image isn't present - PullNever PullPolicy = "Never" - // PullIfNotPresent means that kubelet pulls if the image isn't present on disk. Container will fail if the image isn't present and the pull fails. - PullIfNotPresent PullPolicy = "IfNotPresent" -) - -// TerminationMessagePolicy describes how termination messages are retrieved from a container. -type TerminationMessagePolicy string - -const ( - // TerminationMessageReadFile is the default behavior and will set the container status message to - // the contents of the container's terminationMessagePath when the container exits. - TerminationMessageReadFile TerminationMessagePolicy = "File" - // TerminationMessageFallbackToLogsOnError will read the most recent contents of the container logs - // for the container status message when the container exits with an error and the - // terminationMessagePath has no contents. - TerminationMessageFallbackToLogsOnError TerminationMessagePolicy = "FallbackToLogsOnError" -) - -// Capability represent POSIX capabilities type -type Capability string - -// Capabilities represent POSIX capabilities that can be added or removed to a running container. -type Capabilities struct { - // Added capabilities - // +optional - Add []Capability - // Removed capabilities - // +optional - Drop []Capability -} - -// ResourceRequirements describes the compute resource requirements. -type ResourceRequirements struct { - // Limits describes the maximum amount of compute resources allowed. - // +optional - Limits ResourceList - // Requests describes the minimum amount of compute resources required. - // If Request is omitted for a container, it defaults to Limits if that is explicitly specified, - // otherwise to an implementation-defined value - // +optional - Requests ResourceList -} - -// Container represents a single container that is expected to be run on the host. -type Container struct { - // Required: This must be a DNS_LABEL. Each container in a pod must - // have a unique name. - Name string - // Required. - Image string - // Optional: The docker image's entrypoint is used if this is not provided; cannot be updated. - // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable - // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax - // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, - // regardless of whether the variable exists or not. - // +optional - Command []string - // Optional: The docker image's cmd is used if this is not provided; cannot be updated. - // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable - // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax - // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, - // regardless of whether the variable exists or not. - // +optional - Args []string - // Optional: Defaults to Docker's default. - // +optional - WorkingDir string - // +optional - Ports []ContainerPort - // List of sources to populate environment variables in the container. - // The keys defined within a source must be a C_IDENTIFIER. All invalid keys - // will be reported as an event when the container is starting. When a key exists in multiple - // sources, the value associated with the last source will take precedence. - // Values defined by an Env with a duplicate key will take precedence. - // Cannot be updated. - // +optional - EnvFrom []EnvFromSource - // +optional - Env []EnvVar - // Compute resource requirements. - // +optional - Resources ResourceRequirements - // +optional - VolumeMounts []VolumeMount - // +optional - LivenessProbe *Probe - // +optional - ReadinessProbe *Probe - // +optional - Lifecycle *Lifecycle - // Required. - // +optional - TerminationMessagePath string - // +optional - TerminationMessagePolicy TerminationMessagePolicy - // Required: Policy for pulling images for this container - ImagePullPolicy PullPolicy - // Optional: SecurityContext defines the security options the container should be run with. - // If set, the fields of SecurityContext override the equivalent fields of PodSecurityContext. - // +optional - SecurityContext *SecurityContext - - // Variables for interactive containers, these have very specialized use-cases (e.g. debugging) - // and shouldn't be used for general purpose containers. - // +optional - Stdin bool - // +optional - StdinOnce bool - // +optional - TTY bool -} - -// Handler defines a specific action that should be taken -// TODO: pass structured data to these actions, and document that data here. -type Handler struct { - // One and only one of the following should be specified. - // Exec specifies the action to take. - // +optional - Exec *ExecAction - // HTTPGet specifies the http request to perform. - // +optional - HTTPGet *HTTPGetAction - // TCPSocket specifies an action involving a TCP port. - // TODO: implement a realistic TCP lifecycle hook - // +optional - TCPSocket *TCPSocketAction -} - -// Lifecycle describes actions that the management system should take in response to container lifecycle -// events. For the PostStart and PreStop lifecycle handlers, management of the container blocks -// until the action is complete, unless the container process fails, in which case the handler is aborted. -type Lifecycle struct { - // PostStart is called immediately after a container is created. If the handler fails, the container - // is terminated and restarted. - // +optional - PostStart *Handler - // PreStop is called immediately before a container is terminated. The reason for termination is - // passed to the handler. Regardless of the outcome of the handler, the container is eventually terminated. - // +optional - PreStop *Handler -} - -// The below types are used by kube_client and api_server. - -type ConditionStatus string - -// These are valid condition statuses. "ConditionTrue" means a resource is in the condition; -// "ConditionFalse" means a resource is not in the condition; "ConditionUnknown" means kubernetes -// can't decide if a resource is in the condition or not. In the future, we could add other -// intermediate conditions, e.g. ConditionDegraded. -const ( - ConditionTrue ConditionStatus = "True" - ConditionFalse ConditionStatus = "False" - ConditionUnknown ConditionStatus = "Unknown" -) - -type ContainerStateWaiting struct { - // A brief CamelCase string indicating details about why the container is in waiting state. - // +optional - Reason string - // A human-readable message indicating details about why the container is in waiting state. - // +optional - Message string -} - -type ContainerStateRunning struct { - // +optional - StartedAt metav1.Time -} - -type ContainerStateTerminated struct { - ExitCode int32 - // +optional - Signal int32 - // +optional - Reason string - // +optional - Message string - // +optional - StartedAt metav1.Time - // +optional - FinishedAt metav1.Time - // +optional - ContainerID string -} - -// ContainerState holds a possible state of container. -// Only one of its members may be specified. -// If none of them is specified, the default one is ContainerStateWaiting. -type ContainerState struct { - // +optional - Waiting *ContainerStateWaiting - // +optional - Running *ContainerStateRunning - // +optional - Terminated *ContainerStateTerminated -} - -type ContainerStatus struct { - // Each container in a pod must have a unique name. - Name string - // +optional - State ContainerState - // +optional - LastTerminationState ContainerState - // Ready specifies whether the container has passed its readiness check. - Ready bool - // Note that this is calculated from dead containers. But those containers are subject to - // garbage collection. This value will get capped at 5 by GC. - RestartCount int32 - Image string - ImageID string - // +optional - ContainerID string -} - -// PodPhase is a label for the condition of a pod at the current time. -type PodPhase string - -// These are the valid statuses of pods. -const ( - // PodPending means the pod has been accepted by the system, but one or more of the containers - // has not been started. This includes time before being bound to a node, as well as time spent - // pulling images onto the host. - PodPending PodPhase = "Pending" - // PodRunning means the pod has been bound to a node and all of the containers have been started. - // At least one container is still running or is in the process of being restarted. - PodRunning PodPhase = "Running" - // PodSucceeded means that all containers in the pod have voluntarily terminated - // with a container exit code of 0, and the system is not going to restart any of these containers. - PodSucceeded PodPhase = "Succeeded" - // PodFailed means that all containers in the pod have terminated, and at least one container has - // terminated in a failure (exited with a non-zero exit code or was stopped by the system). - PodFailed PodPhase = "Failed" - // PodUnknown means that for some reason the state of the pod could not be obtained, typically due - // to an error in communicating with the host of the pod. - PodUnknown PodPhase = "Unknown" -) - -type PodConditionType string - -// These are valid conditions of pod. -const ( - // PodScheduled represents status of the scheduling process for this pod. - PodScheduled PodConditionType = "PodScheduled" - // PodReady means the pod is able to service requests and should be added to the - // load balancing pools of all matching services. - PodReady PodConditionType = "Ready" - // PodInitialized means that all init containers in the pod have started successfully. - PodInitialized PodConditionType = "Initialized" - // PodReasonUnschedulable reason in PodScheduled PodCondition means that the scheduler - // can't schedule the pod right now, for example due to insufficient resources in the cluster. - PodReasonUnschedulable = "Unschedulable" -) - -type PodCondition struct { - Type PodConditionType - Status ConditionStatus - // +optional - LastProbeTime metav1.Time - // +optional - LastTransitionTime metav1.Time - // +optional - Reason string - // +optional - Message string -} - -// RestartPolicy describes how the container should be restarted. -// Only one of the following restart policies may be specified. -// If none of the following policies is specified, the default one -// is RestartPolicyAlways. -type RestartPolicy string - -const ( - RestartPolicyAlways RestartPolicy = "Always" - RestartPolicyOnFailure RestartPolicy = "OnFailure" - RestartPolicyNever RestartPolicy = "Never" -) - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PodList is a list of Pods. -type PodList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - Items []Pod -} - -// DNSPolicy defines how a pod's DNS will be configured. -type DNSPolicy string - -const ( - // DNSClusterFirstWithHostNet indicates that the pod should use cluster DNS - // first, if it is available, then fall back on the default - // (as determined by kubelet) DNS settings. - DNSClusterFirstWithHostNet DNSPolicy = "ClusterFirstWithHostNet" - - // DNSClusterFirst indicates that the pod should use cluster DNS - // first unless hostNetwork is true, if it is available, then - // fall back on the default (as determined by kubelet) DNS settings. - DNSClusterFirst DNSPolicy = "ClusterFirst" - - // DNSDefault indicates that the pod should use the default (as - // determined by kubelet) DNS settings. - DNSDefault DNSPolicy = "Default" -) - -// A node selector represents the union of the results of one or more label queries -// over a set of nodes; that is, it represents the OR of the selectors represented -// by the node selector terms. -type NodeSelector struct { - //Required. A list of node selector terms. The terms are ORed. - NodeSelectorTerms []NodeSelectorTerm -} - -// A null or empty node selector term matches no objects. -type NodeSelectorTerm struct { - //Required. A list of node selector requirements. The requirements are ANDed. - MatchExpressions []NodeSelectorRequirement -} - -// A node selector requirement is a selector that contains values, a key, and an operator -// that relates the key and values. -type NodeSelectorRequirement struct { - // The label key that the selector applies to. - Key string - // Represents a key's relationship to a set of values. - // Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - Operator NodeSelectorOperator - // An array of string values. If the operator is In or NotIn, - // the values array must be non-empty. If the operator is Exists or DoesNotExist, - // the values array must be empty. If the operator is Gt or Lt, the values - // array must have a single element, which will be interpreted as an integer. - // This array is replaced during a strategic merge patch. - // +optional - Values []string -} - -// A node selector operator is the set of operators that can be used in -// a node selector requirement. -type NodeSelectorOperator string - -const ( - NodeSelectorOpIn NodeSelectorOperator = "In" - NodeSelectorOpNotIn NodeSelectorOperator = "NotIn" - NodeSelectorOpExists NodeSelectorOperator = "Exists" - NodeSelectorOpDoesNotExist NodeSelectorOperator = "DoesNotExist" - NodeSelectorOpGt NodeSelectorOperator = "Gt" - NodeSelectorOpLt NodeSelectorOperator = "Lt" -) - -// Affinity is a group of affinity scheduling rules. -type Affinity struct { - // Describes node affinity scheduling rules for the pod. - // +optional - NodeAffinity *NodeAffinity - // Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). - // +optional - PodAffinity *PodAffinity - // Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). - // +optional - PodAntiAffinity *PodAntiAffinity -} - -// Pod affinity is a group of inter pod affinity scheduling rules. -type PodAffinity struct { - // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. - // If the affinity requirements specified by this field are not met at - // scheduling time, the pod will not be scheduled onto the node. - // If the affinity requirements specified by this field cease to be met - // at some point during pod execution (e.g. due to a pod label update), the - // system will try to eventually evict the pod from its node. - // When there are multiple elements, the lists of nodes corresponding to each - // podAffinityTerm are intersected, i.e. all terms must be satisfied. - // +optional - // RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm - - // If the affinity requirements specified by this field are not met at - // scheduling time, the pod will not be scheduled onto the node. - // If the affinity requirements specified by this field cease to be met - // at some point during pod execution (e.g. due to a pod label update), the - // system may or may not try to eventually evict the pod from its node. - // When there are multiple elements, the lists of nodes corresponding to each - // podAffinityTerm are intersected, i.e. all terms must be satisfied. - // +optional - RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTerm - // The scheduler will prefer to schedule pods to nodes that satisfy - // the affinity expressions specified by this field, but it may choose - // a node that violates one or more of the expressions. The node that is - // most preferred is the one with the greatest sum of weights, i.e. - // for each node that meets all of the scheduling requirements (resource - // request, requiredDuringScheduling affinity expressions, etc.), - // compute a sum by iterating through the elements of this field and adding - // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the - // node(s) with the highest sum are the most preferred. - // +optional - PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTerm -} - -// Pod anti affinity is a group of inter pod anti affinity scheduling rules. -type PodAntiAffinity struct { - // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. - // If the anti-affinity requirements specified by this field are not met at - // scheduling time, the pod will not be scheduled onto the node. - // If the anti-affinity requirements specified by this field cease to be met - // at some point during pod execution (e.g. due to a pod label update), the - // system will try to eventually evict the pod from its node. - // When there are multiple elements, the lists of nodes corresponding to each - // podAffinityTerm are intersected, i.e. all terms must be satisfied. - // +optional - // RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm - - // If the anti-affinity requirements specified by this field are not met at - // scheduling time, the pod will not be scheduled onto the node. - // If the anti-affinity requirements specified by this field cease to be met - // at some point during pod execution (e.g. due to a pod label update), the - // system may or may not try to eventually evict the pod from its node. - // When there are multiple elements, the lists of nodes corresponding to each - // podAffinityTerm are intersected, i.e. all terms must be satisfied. - // +optional - RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTerm - // The scheduler will prefer to schedule pods to nodes that satisfy - // the anti-affinity expressions specified by this field, but it may choose - // a node that violates one or more of the expressions. The node that is - // most preferred is the one with the greatest sum of weights, i.e. - // for each node that meets all of the scheduling requirements (resource - // request, requiredDuringScheduling anti-affinity expressions, etc.), - // compute a sum by iterating through the elements of this field and adding - // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the - // node(s) with the highest sum are the most preferred. - // +optional - PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTerm -} - -// The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) -type WeightedPodAffinityTerm struct { - // weight associated with matching the corresponding podAffinityTerm, - // in the range 1-100. - Weight int32 - // Required. A pod affinity term, associated with the corresponding weight. - PodAffinityTerm PodAffinityTerm -} - -// Defines a set of pods (namely those matching the labelSelector -// relative to the given namespace(s)) that this pod should be -// co-located (affinity) or not co-located (anti-affinity) with, -// where co-located is defined as running on a node whose value of -// the label with key matches that of any node on which -// a pod of the set of pods is running. -type PodAffinityTerm struct { - // A label query over a set of resources, in this case pods. - // +optional - LabelSelector *metav1.LabelSelector - // namespaces specifies which namespaces the labelSelector applies to (matches against); - // null or empty list means "this pod's namespace" - Namespaces []string - // This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching - // the labelSelector in the specified namespaces, where co-located is defined as running on a node - // whose value of the label with key topologyKey matches that of any node on which any of the - // selected pods is running. - // Empty topologyKey is not allowed. - // +optional - TopologyKey string -} - -// Node affinity is a group of node affinity scheduling rules. -type NodeAffinity struct { - // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. - // If the affinity requirements specified by this field are not met at - // scheduling time, the pod will not be scheduled onto the node. - // If the affinity requirements specified by this field cease to be met - // at some point during pod execution (e.g. due to an update), the system - // will try to eventually evict the pod from its node. - // +optional - // RequiredDuringSchedulingRequiredDuringExecution *NodeSelector - - // If the affinity requirements specified by this field are not met at - // scheduling time, the pod will not be scheduled onto the node. - // If the affinity requirements specified by this field cease to be met - // at some point during pod execution (e.g. due to an update), the system - // may or may not try to eventually evict the pod from its node. - // +optional - RequiredDuringSchedulingIgnoredDuringExecution *NodeSelector - // The scheduler will prefer to schedule pods to nodes that satisfy - // the affinity expressions specified by this field, but it may choose - // a node that violates one or more of the expressions. The node that is - // most preferred is the one with the greatest sum of weights, i.e. - // for each node that meets all of the scheduling requirements (resource - // request, requiredDuringScheduling affinity expressions, etc.), - // compute a sum by iterating through the elements of this field and adding - // "weight" to the sum if the node matches the corresponding matchExpressions; the - // node(s) with the highest sum are the most preferred. - // +optional - PreferredDuringSchedulingIgnoredDuringExecution []PreferredSchedulingTerm -} - -// An empty preferred scheduling term matches all objects with implicit weight 0 -// (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). -type PreferredSchedulingTerm struct { - // Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. - Weight int32 - // A node selector term, associated with the corresponding weight. - Preference NodeSelectorTerm -} - -// The node this Taint is attached to has the "effect" on -// any pod that does not tolerate the Taint. -type Taint struct { - // Required. The taint key to be applied to a node. - Key string - // Required. The taint value corresponding to the taint key. - // +optional - Value string - // Required. The effect of the taint on pods - // that do not tolerate the taint. - // Valid effects are NoSchedule, PreferNoSchedule and NoExecute. - Effect TaintEffect - // TimeAdded represents the time at which the taint was added. - // It is only written for NoExecute taints. - // +optional - TimeAdded metav1.Time -} - -type TaintEffect string - -const ( - // Do not allow new pods to schedule onto the node unless they tolerate the taint, - // but allow all pods submitted to Kubelet without going through the scheduler - // to start, and allow all already-running pods to continue running. - // Enforced by the scheduler. - TaintEffectNoSchedule TaintEffect = "NoSchedule" - // Like TaintEffectNoSchedule, but the scheduler tries not to schedule - // new pods onto the node, rather than prohibiting new pods from scheduling - // onto the node entirely. Enforced by the scheduler. - TaintEffectPreferNoSchedule TaintEffect = "PreferNoSchedule" - // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. - // Like TaintEffectNoSchedule, but additionally do not allow pods submitted to - // Kubelet without going through the scheduler to start. - // Enforced by Kubelet and the scheduler. - // TaintEffectNoScheduleNoAdmit TaintEffect = "NoScheduleNoAdmit" - - // Evict any already-running pods that do not tolerate the taint. - // Currently enforced by NodeController. - TaintEffectNoExecute TaintEffect = "NoExecute" -) - -// The pod this Toleration is attached to tolerates any taint that matches -// the triple using the matching operator . -type Toleration struct { - // Key is the taint key that the toleration applies to. Empty means match all taint keys. - // If the key is empty, operator must be Exists; this combination means to match all values and all keys. - // +optional - Key string - // Operator represents a key's relationship to the value. - // Valid operators are Exists and Equal. Defaults to Equal. - // Exists is equivalent to wildcard for value, so that a pod can - // tolerate all taints of a particular category. - // +optional - Operator TolerationOperator - // Value is the taint value the toleration matches to. - // If the operator is Exists, the value should be empty, otherwise just a regular string. - // +optional - Value string - // Effect indicates the taint effect to match. Empty means match all taint effects. - // When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. - // +optional - Effect TaintEffect - // TolerationSeconds represents the period of time the toleration (which must be - // of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, - // it is not set, which means tolerate the taint forever (do not evict). Zero and - // negative values will be treated as 0 (evict immediately) by the system. - // +optional - TolerationSeconds *int64 -} - -// A toleration operator is the set of operators that can be used in a toleration. -type TolerationOperator string - -const ( - TolerationOpExists TolerationOperator = "Exists" - TolerationOpEqual TolerationOperator = "Equal" -) - -// PodSpec is a description of a pod -type PodSpec struct { - Volumes []Volume - // List of initialization containers belonging to the pod. - InitContainers []Container - // List of containers belonging to the pod. - Containers []Container - // +optional - RestartPolicy RestartPolicy - // Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. - // Value must be non-negative integer. The value zero indicates delete immediately. - // If this value is nil, the default grace period will be used instead. - // The grace period is the duration in seconds after the processes running in the pod are sent - // a termination signal and the time when the processes are forcibly halted with a kill signal. - // Set this value longer than the expected cleanup time for your process. - // +optional - TerminationGracePeriodSeconds *int64 - // Optional duration in seconds relative to the StartTime that the pod may be active on a node - // before the system actively tries to terminate the pod; value must be positive integer - // +optional - ActiveDeadlineSeconds *int64 - // Required: Set DNS policy. - // +optional - DNSPolicy DNSPolicy - // NodeSelector is a selector which must be true for the pod to fit on a node - // +optional - NodeSelector map[string]string - - // ServiceAccountName is the name of the ServiceAccount to use to run this pod - // The pod will be allowed to use secrets referenced by the ServiceAccount - ServiceAccountName string - // AutomountServiceAccountToken indicates whether a service account token should be automatically mounted. - // +optional - AutomountServiceAccountToken *bool - - // NodeName is a request to schedule this pod onto a specific node. If it is non-empty, - // the scheduler simply schedules this pod onto that node, assuming that it fits resource - // requirements. - // +optional - NodeName string - // SecurityContext holds pod-level security attributes and common container settings. - // Optional: Defaults to empty. See type description for default values of each field. - // +optional - SecurityContext *PodSecurityContext - // ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. - // If specified, these secrets will be passed to individual puller implementations for them to use. For example, - // in the case of docker, only DockerConfig type secrets are honored. - // +optional - ImagePullSecrets []LocalObjectReference - // Specifies the hostname of the Pod. - // If not specified, the pod's hostname will be set to a system-defined value. - // +optional - Hostname string - // If specified, the fully qualified Pod hostname will be "...svc.". - // If not specified, the pod will not have a domainname at all. - // +optional - Subdomain string - // If specified, the pod's scheduling constraints - // +optional - Affinity *Affinity - // If specified, the pod will be dispatched by specified scheduler. - // If not specified, the pod will be dispatched by default scheduler. - // +optional - SchedulerName string - // If specified, the pod's tolerations. - // +optional - Tolerations []Toleration - // HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts - // file if specified. This is only valid for non-hostNetwork pods. - // +optional - HostAliases []HostAlias - // If specified, indicates the pod's priority. "SYSTEM" is a special keyword - // which indicates the highest priority. Any other name must be defined by - // creating a PriorityClass object with that name. - // If not specified, the pod priority will be default or zero if there is no - // default. - // +optional - PriorityClassName string - // The priority value. Various system components use this field to find the - // priority of the pod. When Priority Admission Controller is enabled, it - // prevents users from setting this field. The admission controller populates - // this field from PriorityClassName. - // The higher the value, the higher the priority. - // +optional - Priority *int32 -} - -// HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the -// pod's hosts file. -type HostAlias struct { - IP string - Hostnames []string -} - -// Sysctl defines a kernel parameter to be set -type Sysctl struct { - // Name of a property to set - Name string - // Value of a property to set - Value string -} - -// PodSecurityContext holds pod-level security attributes and common container settings. -// Some fields are also present in container.securityContext. Field values of -// container.securityContext take precedence over field values of PodSecurityContext. -type PodSecurityContext struct { - // Use the host's network namespace. If this option is set, the ports that will be - // used must be specified. - // Optional: Default to false - // +k8s:conversion-gen=false - // +optional - HostNetwork bool - // Use the host's pid namespace. - // Optional: Default to false. - // +k8s:conversion-gen=false - // +optional - HostPID bool - // Use the host's ipc namespace. - // Optional: Default to false. - // +k8s:conversion-gen=false - // +optional - HostIPC bool - // The SELinux context to be applied to all containers. - // If unspecified, the container runtime will allocate a random SELinux context for each - // container. May also be set in SecurityContext. If set in - // both SecurityContext and PodSecurityContext, the value specified in SecurityContext - // takes precedence for that container. - // +optional - SELinuxOptions *SELinuxOptions - // The UID to run the entrypoint of the container process. - // Defaults to user specified in image metadata if unspecified. - // May also be set in SecurityContext. If set in both SecurityContext and - // PodSecurityContext, the value specified in SecurityContext takes precedence - // for that container. - // +optional - RunAsUser *int64 - // Indicates that the container must run as a non-root user. - // If true, the Kubelet will validate the image at runtime to ensure that it - // does not run as UID 0 (root) and fail to start the container if it does. - // If unset or false, no such validation will be performed. - // May also be set in SecurityContext. If set in both SecurityContext and - // PodSecurityContext, the value specified in SecurityContext takes precedence. - // +optional - RunAsNonRoot *bool - // A list of groups applied to the first process run in each container, in addition - // to the container's primary GID. If unspecified, no groups will be added to - // any container. - // +optional - SupplementalGroups []int64 - // A special supplemental group that applies to all containers in a pod. - // Some volume types allow the Kubelet to change the ownership of that volume - // to be owned by the pod: - // - // 1. The owning GID will be the FSGroup - // 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) - // 3. The permission bits are OR'd with rw-rw---- - // - // If unset, the Kubelet will not modify the ownership and permissions of any volume. - // +optional - FSGroup *int64 -} - -// PodQOSClass defines the supported qos classes of Pods. -type PodQOSClass string - -const ( - // PodQOSGuaranteed is the Guaranteed qos class. - PodQOSGuaranteed PodQOSClass = "Guaranteed" - // PodQOSBurstable is the Burstable qos class. - PodQOSBurstable PodQOSClass = "Burstable" - // PodQOSBestEffort is the BestEffort qos class. - PodQOSBestEffort PodQOSClass = "BestEffort" -) - -// PodStatus represents information about the status of a pod. Status may trail the actual -// state of a system. -type PodStatus struct { - // +optional - Phase PodPhase - // +optional - Conditions []PodCondition - // A human readable message indicating details about why the pod is in this state. - // +optional - Message string - // A brief CamelCase message indicating details about why the pod is in this state. e.g. 'Evicted' - // +optional - Reason string - - // +optional - HostIP string - // +optional - PodIP string - - // Date and time at which the object was acknowledged by the Kubelet. - // This is before the Kubelet pulled the container image(s) for the pod. - // +optional - StartTime *metav1.Time - // +optional - QOSClass PodQOSClass - - // The list has one entry per init container in the manifest. The most recent successful - // init container will have ready = true, the most recently started container will have - // startTime set. - // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-and-container-status - InitContainerStatuses []ContainerStatus - // The list has one entry per container in the manifest. Each entry is - // currently the output of `docker inspect`. This output format is *not* - // final and should not be relied upon. - // TODO: Make real decisions about what our info should look like. Re-enable fuzz test - // when we have done this. - // +optional - ContainerStatuses []ContainerStatus -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded -type PodStatusResult struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - // Status represents the current information about a pod. This data may not be up - // to date. - // +optional - Status PodStatus -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Pod is a collection of containers, used as either input (create, update) or as output (list, get). -type Pod struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // Spec defines the behavior of a pod. - // +optional - Spec PodSpec - - // Status represents the current information about a pod. This data may not be up - // to date. - // +optional - Status PodStatus -} - -// PodTemplateSpec describes the data a pod should have when created from a template -type PodTemplateSpec struct { - // Metadata of the pods created from this template. - // +optional - metav1.ObjectMeta - - // Spec defines the behavior of a pod. - // +optional - Spec PodSpec -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PodTemplate describes a template for creating copies of a predefined pod. -type PodTemplate struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // Template defines the pods that will be created from this pod template - // +optional - Template PodTemplateSpec -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PodTemplateList is a list of PodTemplates. -type PodTemplateList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - Items []PodTemplate -} - -// ReplicationControllerSpec is the specification of a replication controller. -// As the internal representation of a replication controller, it may have either -// a TemplateRef or a Template set. -type ReplicationControllerSpec struct { - // Replicas is the number of desired replicas. - Replicas int32 - - // Minimum number of seconds for which a newly created pod should be ready - // without any of its container crashing, for it to be considered available. - // Defaults to 0 (pod will be considered available as soon as it is ready) - // +optional - MinReadySeconds int32 - - // Selector is a label query over pods that should match the Replicas count. - Selector map[string]string - - // TemplateRef is a reference to an object that describes the pod that will be created if - // insufficient replicas are detected. This reference is ignored if a Template is set. - // Must be set before converting to a versioned API object - // +optional - //TemplateRef *ObjectReference - - // Template is the object that describes the pod that will be created if - // insufficient replicas are detected. Internally, this takes precedence over a - // TemplateRef. - // +optional - Template *PodTemplateSpec -} - -// ReplicationControllerStatus represents the current status of a replication -// controller. -type ReplicationControllerStatus struct { - // Replicas is the number of actual replicas. - Replicas int32 - - // The number of pods that have labels matching the labels of the pod template of the replication controller. - // +optional - FullyLabeledReplicas int32 - - // The number of ready replicas for this replication controller. - // +optional - ReadyReplicas int32 - - // The number of available replicas (ready for at least minReadySeconds) for this replication controller. - // +optional - AvailableReplicas int32 - - // ObservedGeneration is the most recent generation observed by the controller. - // +optional - ObservedGeneration int64 - - // Represents the latest available observations of a replication controller's current state. - // +optional - Conditions []ReplicationControllerCondition -} - -type ReplicationControllerConditionType string - -// These are valid conditions of a replication controller. -const ( - // ReplicationControllerReplicaFailure is added in a replication controller when one of its pods - // fails to be created due to insufficient quota, limit ranges, pod security policy, node selectors, - // etc. or deleted due to kubelet being down or finalizers are failing. - ReplicationControllerReplicaFailure ReplicationControllerConditionType = "ReplicaFailure" -) - -// ReplicationControllerCondition describes the state of a replication controller at a certain point. -type ReplicationControllerCondition struct { - // Type of replication controller condition. - Type ReplicationControllerConditionType - // Status of the condition, one of True, False, Unknown. - Status ConditionStatus - // The last time the condition transitioned from one status to another. - // +optional - LastTransitionTime metav1.Time - // The reason for the condition's last transition. - // +optional - Reason string - // A human readable message indicating details about the transition. - // +optional - Message string -} - -// +genclient -// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/kubernetes/pkg/apis/extensions.Scale -// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/kubernetes/pkg/apis/extensions.Scale,result=k8s.io/kubernetes/pkg/apis/extensions.Scale -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ReplicationController represents the configuration of a replication controller. -type ReplicationController struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // Spec defines the desired behavior of this replication controller. - // +optional - Spec ReplicationControllerSpec - - // Status is the current status of this replication controller. This data may be - // out of date by some window of time. - // +optional - Status ReplicationControllerStatus -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ReplicationControllerList is a collection of replication controllers. -type ReplicationControllerList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - Items []ReplicationController -} - -const ( - // ClusterIPNone - do not assign a cluster IP - // no proxying required and no environment variables should be created for pods - ClusterIPNone = "None" -) - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ServiceList holds a list of services. -type ServiceList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - Items []Service -} - -// Session Affinity Type string -type ServiceAffinity string - -const ( - // ServiceAffinityClientIP is the Client IP based. - ServiceAffinityClientIP ServiceAffinity = "ClientIP" - - // ServiceAffinityNone - no session affinity. - ServiceAffinityNone ServiceAffinity = "None" -) - -const ( - // DefaultClientIPServiceAffinitySeconds is the default timeout seconds - // of Client IP based session affinity - 3 hours. - DefaultClientIPServiceAffinitySeconds int32 = 10800 - // MaxClientIPServiceAffinitySeconds is the max timeout seconds - // of Client IP based session affinity - 1 day. - MaxClientIPServiceAffinitySeconds int32 = 86400 -) - -// SessionAffinityConfig represents the configurations of session affinity. -type SessionAffinityConfig struct { - // clientIP contains the configurations of Client IP based session affinity. - // +optional - ClientIP *ClientIPConfig -} - -// ClientIPConfig represents the configurations of Client IP based session affinity. -type ClientIPConfig struct { - // timeoutSeconds specifies the seconds of ClientIP type session sticky time. - // The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP". - // Default value is 10800(for 3 hours). - // +optional - TimeoutSeconds *int32 -} - -// Service Type string describes ingress methods for a service -type ServiceType string - -const ( - // ServiceTypeClusterIP means a service will only be accessible inside the - // cluster, via the ClusterIP. - ServiceTypeClusterIP ServiceType = "ClusterIP" - - // ServiceTypeNodePort means a service will be exposed on one port of - // every node, in addition to 'ClusterIP' type. - ServiceTypeNodePort ServiceType = "NodePort" - - // ServiceTypeLoadBalancer means a service will be exposed via an - // external load balancer (if the cloud provider supports it), in addition - // to 'NodePort' type. - ServiceTypeLoadBalancer ServiceType = "LoadBalancer" - - // ServiceTypeExternalName means a service consists of only a reference to - // an external name that kubedns or equivalent will return as a CNAME - // record, with no exposing or proxying of any pods involved. - ServiceTypeExternalName ServiceType = "ExternalName" -) - -// Service External Traffic Policy Type string -type ServiceExternalTrafficPolicyType string - -const ( - // ServiceExternalTrafficPolicyTypeLocal specifies node-local endpoints behavior. - ServiceExternalTrafficPolicyTypeLocal ServiceExternalTrafficPolicyType = "Local" - // ServiceExternalTrafficPolicyTypeCluster specifies cluster-wide (legacy) behavior. - ServiceExternalTrafficPolicyTypeCluster ServiceExternalTrafficPolicyType = "Cluster" -) - -// ServiceStatus represents the current status of a service -type ServiceStatus struct { - // LoadBalancer contains the current status of the load-balancer, - // if one is present. - // +optional - LoadBalancer LoadBalancerStatus -} - -// LoadBalancerStatus represents the status of a load-balancer -type LoadBalancerStatus struct { - // Ingress is a list containing ingress points for the load-balancer; - // traffic intended for the service should be sent to these ingress points. - // +optional - Ingress []LoadBalancerIngress -} - -// LoadBalancerIngress represents the status of a load-balancer ingress point: -// traffic intended for the service should be sent to an ingress point. -type LoadBalancerIngress struct { - // IP is set for load-balancer ingress points that are IP based - // (typically GCE or OpenStack load-balancers) - // +optional - IP string - - // Hostname is set for load-balancer ingress points that are DNS based - // (typically AWS load-balancers) - // +optional - Hostname string -} - -// ServiceSpec describes the attributes that a user creates on a service -type ServiceSpec struct { - // Type determines how the Service is exposed. Defaults to ClusterIP. Valid - // options are ExternalName, ClusterIP, NodePort, and LoadBalancer. - // "ExternalName" maps to the specified externalName. - // "ClusterIP" allocates a cluster-internal IP address for load-balancing to - // endpoints. Endpoints are determined by the selector or if that is not - // specified, by manual construction of an Endpoints object. If clusterIP is - // "None", no virtual IP is allocated and the endpoints are published as a - // set of endpoints rather than a stable IP. - // "NodePort" builds on ClusterIP and allocates a port on every node which - // routes to the clusterIP. - // "LoadBalancer" builds on NodePort and creates an - // external load-balancer (if supported in the current cloud) which routes - // to the clusterIP. - // More info: https://kubernetes.io/docs/concepts/services-networking/service/ - // +optional - Type ServiceType - - // Required: The list of ports that are exposed by this service. - Ports []ServicePort - - // Route service traffic to pods with label keys and values matching this - // selector. If empty or not present, the service is assumed to have an - // external process managing its endpoints, which Kubernetes will not - // modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. - // Ignored if type is ExternalName. - // More info: https://kubernetes.io/docs/concepts/services-networking/service/ - Selector map[string]string - - // ClusterIP is the IP address of the service and is usually assigned - // randomly by the master. If an address is specified manually and is not in - // use by others, it will be allocated to the service; otherwise, creation - // of the service will fail. This field can not be changed through updates. - // Valid values are "None", empty string (""), or a valid IP address. "None" - // can be specified for headless services when proxying is not required. - // Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if - // type is ExternalName. - // More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies - // +optional - ClusterIP string - - // ExternalName is the external reference that kubedns or equivalent will - // return as a CNAME record for this service. No proxying will be involved. - // Must be a valid DNS name and requires Type to be ExternalName. - ExternalName string - - // ExternalIPs are used by external load balancers, or can be set by - // users to handle external traffic that arrives at a node. - // +optional - ExternalIPs []string - - // Only applies to Service Type: LoadBalancer - // LoadBalancer will get created with the IP specified in this field. - // This feature depends on whether the underlying cloud-provider supports specifying - // the loadBalancerIP when a load balancer is created. - // This field will be ignored if the cloud-provider does not support the feature. - // +optional - LoadBalancerIP string - - // Optional: Supports "ClientIP" and "None". Used to maintain session affinity. - // +optional - SessionAffinity ServiceAffinity - - // sessionAffinityConfig contains the configurations of session affinity. - // +optional - SessionAffinityConfig *SessionAffinityConfig - - // Optional: If specified and supported by the platform, this will restrict traffic through the cloud-provider - // load-balancer will be restricted to the specified client IPs. This field will be ignored if the - // cloud-provider does not support the feature." - // +optional - LoadBalancerSourceRanges []string - - // externalTrafficPolicy denotes if this Service desires to route external - // traffic to node-local or cluster-wide endpoints. "Local" preserves the - // client source IP and avoids a second hop for LoadBalancer and Nodeport - // type services, but risks potentially imbalanced traffic spreading. - // "Cluster" obscures the client source IP and may cause a second hop to - // another node, but should have good overall load-spreading. - // +optional - ExternalTrafficPolicy ServiceExternalTrafficPolicyType - - // healthCheckNodePort specifies the healthcheck nodePort for the service. - // If not specified, HealthCheckNodePort is created by the service api - // backend with the allocated nodePort. Will use user-specified nodePort value - // if specified by the client. Only effects when Type is set to LoadBalancer - // and ExternalTrafficPolicy is set to Local. - // +optional - HealthCheckNodePort int32 - - // publishNotReadyAddresses, when set to true, indicates that DNS implementations - // must publish the notReadyAddresses of subsets for the Endpoints associated with - // the Service. The default value is false. - // The primary use case for setting this field is to use a StatefulSet's Headless Service - // to propagate SRV records for its Pods without respect to their readiness for purpose - // of peer discovery. - // This field will replace the service.alpha.kubernetes.io/tolerate-unready-endpoints - // when that annotation is deprecated and all clients have been converted to use this - // field. - // +optional - PublishNotReadyAddresses bool -} - -type ServicePort struct { - // Optional if only one ServicePort is defined on this service: The - // name of this port within the service. This must be a DNS_LABEL. - // All ports within a ServiceSpec must have unique names. This maps to - // the 'Name' field in EndpointPort objects. - Name string - - // The IP protocol for this port. Supports "TCP" and "UDP". - Protocol Protocol - - // The port that will be exposed on the service. - Port int32 - - // Optional: The target port on pods selected by this service. If this - // is a string, it will be looked up as a named port in the target - // Pod's container ports. If this is not specified, the value - // of the 'port' field is used (an identity map). - // This field is ignored for services with clusterIP=None, and should be - // omitted or set equal to the 'port' field. - TargetPort intstr.IntOrString - - // The port on each node on which this service is exposed. - // Default is to auto-allocate a port if the ServiceType of this Service requires one. - NodePort int32 -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Service is a named abstraction of software service (for example, mysql) consisting of local port -// (for example 3306) that the proxy listens on, and the selector that determines which pods -// will answer requests sent through the proxy. -type Service struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // Spec defines the behavior of a service. - // +optional - Spec ServiceSpec - - // Status represents the current status of a service. - // +optional - Status ServiceStatus -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ServiceAccount binds together: -// * a name, understood by users, and perhaps by peripheral systems, for an identity -// * a principal that can be authenticated and authorized -// * a set of secrets -type ServiceAccount struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount - Secrets []ObjectReference - - // ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images - // in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets - // can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. - // +optional - ImagePullSecrets []LocalObjectReference - - // AutomountServiceAccountToken indicates whether pods running as this service account should have an API token automatically mounted. - // Can be overridden at the pod level. - // +optional - AutomountServiceAccountToken *bool -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ServiceAccountList is a list of ServiceAccount objects -type ServiceAccountList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - Items []ServiceAccount -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Endpoints is a collection of endpoints that implement the actual service. Example: -// Name: "mysvc", -// Subsets: [ -// { -// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}], -// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}] -// }, -// { -// Addresses: [{"ip": "10.10.3.3"}], -// Ports: [{"name": "a", "port": 93}, {"name": "b", "port": 76}] -// }, -// ] -type Endpoints struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // The set of all endpoints is the union of all subsets. - Subsets []EndpointSubset -} - -// EndpointSubset is a group of addresses with a common set of ports. The -// expanded set of endpoints is the Cartesian product of Addresses x Ports. -// For example, given: -// { -// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}], -// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}] -// } -// The resulting set of endpoints can be viewed as: -// a: [ 10.10.1.1:8675, 10.10.2.2:8675 ], -// b: [ 10.10.1.1:309, 10.10.2.2:309 ] -type EndpointSubset struct { - Addresses []EndpointAddress - NotReadyAddresses []EndpointAddress - Ports []EndpointPort -} - -// EndpointAddress is a tuple that describes single IP address. -type EndpointAddress struct { - // The IP of this endpoint. - // IPv6 is also accepted but not fully supported on all platforms. Also, certain - // kubernetes components, like kube-proxy, are not IPv6 ready. - // TODO: This should allow hostname or IP, see #4447. - IP string - // Optional: Hostname of this endpoint - // Meant to be used by DNS servers etc. - // +optional - Hostname string - // Optional: Node hosting this endpoint. This can be used to determine endpoints local to a node. - // +optional - NodeName *string - // Optional: The kubernetes object related to the entry point. - TargetRef *ObjectReference -} - -// EndpointPort is a tuple that describes a single port. -type EndpointPort struct { - // The name of this port (corresponds to ServicePort.Name). Optional - // if only one port is defined. Must be a DNS_LABEL. - Name string - - // The port number. - Port int32 - - // The IP protocol for this port. - Protocol Protocol -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// EndpointsList is a list of endpoints. -type EndpointsList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - Items []Endpoints -} - -// NodeSpec describes the attributes that a node is created with. -type NodeSpec struct { - // PodCIDR represents the pod IP range assigned to the node - // Note: assigning IP ranges to nodes might need to be revisited when we support migratable IPs. - // +optional - PodCIDR string - - // External ID of the node assigned by some machine database (e.g. a cloud provider) - // +optional - ExternalID string - - // ID of the node assigned by the cloud provider - // Note: format is "://" - // +optional - ProviderID string - - // Unschedulable controls node schedulability of new pods. By default node is schedulable. - // +optional - Unschedulable bool - - // If specified, the node's taints. - // +optional - Taints []Taint - - // If specified, the source to get node configuration from - // The DynamicKubeletConfig feature gate must be enabled for the Kubelet to use this field - // +optional - ConfigSource *NodeConfigSource -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// NodeConfigSource specifies a source of node configuration. Exactly one subfield must be non-nil. -type NodeConfigSource struct { - metav1.TypeMeta - ConfigMapRef *ObjectReference -} - -// DaemonEndpoint contains information about a single Daemon endpoint. -type DaemonEndpoint struct { - /* - The port tag was not properly in quotes in earlier releases, so it must be - uppercased for backwards compat (since it was falling back to var name of - 'Port'). - */ - - // Port number of the given endpoint. - Port int32 -} - -// NodeDaemonEndpoints lists ports opened by daemons running on the Node. -type NodeDaemonEndpoints struct { - // Endpoint on which Kubelet is listening. - // +optional - KubeletEndpoint DaemonEndpoint -} - -// NodeSystemInfo is a set of ids/uuids to uniquely identify the node. -type NodeSystemInfo struct { - // MachineID reported by the node. For unique machine identification - // in the cluster this field is preferred. Learn more from man(5) - // machine-id: http://man7.org/linux/man-pages/man5/machine-id.5.html - MachineID string - // SystemUUID reported by the node. For unique machine identification - // MachineID is preferred. This field is specific to Red Hat hosts - // https://access.redhat.com/documentation/en-US/Red_Hat_Subscription_Management/1/html/RHSM/getting-system-uuid.html - SystemUUID string - // Boot ID reported by the node. - BootID string - // Kernel Version reported by the node. - KernelVersion string - // OS Image reported by the node. - OSImage string - // ContainerRuntime Version reported by the node. - ContainerRuntimeVersion string - // Kubelet Version reported by the node. - KubeletVersion string - // KubeProxy Version reported by the node. - KubeProxyVersion string - // The Operating System reported by the node - OperatingSystem string - // The Architecture reported by the node - Architecture string -} - -// NodeStatus is information about the current status of a node. -type NodeStatus struct { - // Capacity represents the total resources of a node. - // +optional - Capacity ResourceList - // Allocatable represents the resources of a node that are available for scheduling. - // +optional - Allocatable ResourceList - // NodePhase is the current lifecycle phase of the node. - // +optional - Phase NodePhase - // Conditions is an array of current node conditions. - // +optional - Conditions []NodeCondition - // Queried from cloud provider, if available. - // +optional - Addresses []NodeAddress - // Endpoints of daemons running on the Node. - // +optional - DaemonEndpoints NodeDaemonEndpoints - // Set of ids/uuids to uniquely identify the node. - // +optional - NodeInfo NodeSystemInfo - // List of container images on this node - // +optional - Images []ContainerImage - // List of attachable volumes in use (mounted) by the node. - // +optional - VolumesInUse []UniqueVolumeName - // List of volumes that are attached to the node. - // +optional - VolumesAttached []AttachedVolume -} - -type UniqueVolumeName string - -// AttachedVolume describes a volume attached to a node -type AttachedVolume struct { - // Name of the attached volume - Name UniqueVolumeName - - // DevicePath represents the device path where the volume should be available - DevicePath string -} - -// AvoidPods describes pods that should avoid this node. This is the value for a -// Node annotation with key scheduler.alpha.kubernetes.io/preferAvoidPods and -// will eventually become a field of NodeStatus. -type AvoidPods struct { - // Bounded-sized list of signatures of pods that should avoid this node, sorted - // in timestamp order from oldest to newest. Size of the slice is unspecified. - // +optional - PreferAvoidPods []PreferAvoidPodsEntry -} - -// Describes a class of pods that should avoid this node. -type PreferAvoidPodsEntry struct { - // The class of pods. - PodSignature PodSignature - // Time at which this entry was added to the list. - // +optional - EvictionTime metav1.Time - // (brief) reason why this entry was added to the list. - // +optional - Reason string - // Human readable message indicating why this entry was added to the list. - // +optional - Message string -} - -// Describes the class of pods that should avoid this node. -// Exactly one field should be set. -type PodSignature struct { - // Reference to controller whose pods should avoid this node. - // +optional - PodController *metav1.OwnerReference -} - -// Describe a container image -type ContainerImage struct { - // Names by which this image is known. - Names []string - // The size of the image in bytes. - // +optional - SizeBytes int64 -} - -type NodePhase string - -// These are the valid phases of node. -const ( - // NodePending means the node has been created/added by the system, but not configured. - NodePending NodePhase = "Pending" - // NodeRunning means the node has been configured and has Kubernetes components running. - NodeRunning NodePhase = "Running" - // NodeTerminated means the node has been removed from the cluster. - NodeTerminated NodePhase = "Terminated" -) - -type NodeConditionType string - -// These are valid conditions of node. Currently, we don't have enough information to decide -// node condition. In the future, we will add more. The proposed set of conditions are: -// NodeReady, NodeReachable -const ( - // NodeReady means kubelet is healthy and ready to accept pods. - NodeReady NodeConditionType = "Ready" - // NodeOutOfDisk means the kubelet will not accept new pods due to insufficient free disk - // space on the node. - NodeOutOfDisk NodeConditionType = "OutOfDisk" - // NodeMemoryPressure means the kubelet is under pressure due to insufficient available memory. - NodeMemoryPressure NodeConditionType = "MemoryPressure" - // NodeDiskPressure means the kubelet is under pressure due to insufficient available disk. - NodeDiskPressure NodeConditionType = "DiskPressure" - // NodeNetworkUnavailable means that network for the node is not correctly configured. - NodeNetworkUnavailable NodeConditionType = "NetworkUnavailable" - // NodeConfigOK indicates whether the kubelet is correctly configured - NodeConfigOK NodeConditionType = "ConfigOK" -) - -type NodeCondition struct { - Type NodeConditionType - Status ConditionStatus - // +optional - LastHeartbeatTime metav1.Time - // +optional - LastTransitionTime metav1.Time - // +optional - Reason string - // +optional - Message string -} - -type NodeAddressType string - -const ( - NodeHostName NodeAddressType = "Hostname" - NodeExternalIP NodeAddressType = "ExternalIP" - NodeInternalIP NodeAddressType = "InternalIP" - NodeExternalDNS NodeAddressType = "ExternalDNS" - NodeInternalDNS NodeAddressType = "InternalDNS" -) - -type NodeAddress struct { - Type NodeAddressType - Address string -} - -// NodeResources is an object for conveying resource information about a node. -// see http://releases.k8s.io/HEAD/docs/design/resources.md for more details. -type NodeResources struct { - // Capacity represents the available resources of a node - // +optional - Capacity ResourceList -} - -// ResourceName is the name identifying various resources in a ResourceList. -type ResourceName string - -// Resource names must be not more than 63 characters, consisting of upper- or lower-case alphanumeric characters, -// with the -, _, and . characters allowed anywhere, except the first or last character. -// The default convention, matching that for annotations, is to use lower-case names, with dashes, rather than -// camel case, separating compound words. -// Fully-qualified resource typenames are constructed from a DNS-style subdomain, followed by a slash `/` and a name. -const ( - // CPU, in cores. (500m = .5 cores) - ResourceCPU ResourceName = "cpu" - // Memory, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) - ResourceMemory ResourceName = "memory" - // Volume size, in bytes (e,g. 5Gi = 5GiB = 5 * 1024 * 1024 * 1024) - ResourceStorage ResourceName = "storage" - // Local ephemeral storage, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) - // The resource name for ResourceEphemeralStorage is alpha and it can change across releases. - ResourceEphemeralStorage ResourceName = "ephemeral-storage" - // NVIDIA GPU, in devices. Alpha, might change: although fractional and allowing values >1, only one whole device per node is assigned. - ResourceNvidiaGPU ResourceName = "alpha.kubernetes.io/nvidia-gpu" -) - -const ( - // Namespace prefix for opaque counted resources (alpha). - ResourceOpaqueIntPrefix = "pod.alpha.kubernetes.io/opaque-int-resource-" - // Default namespace prefix. - ResourceDefaultNamespacePrefix = "kubernetes.io/" - // Name prefix for huge page resources (alpha). - ResourceHugePagesPrefix = "hugepages-" -) - -// ResourceList is a set of (resource name, quantity) pairs. -type ResourceList map[ResourceName]resource.Quantity - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Node is a worker node in Kubernetes -// The name of the node according to etcd is in ObjectMeta.Name. -type Node struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // Spec defines the behavior of a node. - // +optional - Spec NodeSpec - - // Status describes the current status of a Node - // +optional - Status NodeStatus -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// NodeList is a list of nodes. -type NodeList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - Items []Node -} - -// NamespaceSpec describes the attributes on a Namespace -type NamespaceSpec struct { - // Finalizers is an opaque list of values that must be empty to permanently remove object from storage - Finalizers []FinalizerName -} - -// FinalizerName is the name identifying a finalizer during namespace lifecycle. -type FinalizerName string - -// These are internal finalizer values to Kubernetes, must be qualified name unless defined here or -// in metav1. -const ( - FinalizerKubernetes FinalizerName = "kubernetes" -) - -// NamespaceStatus is information about the current status of a Namespace. -type NamespaceStatus struct { - // Phase is the current lifecycle phase of the namespace. - // +optional - Phase NamespacePhase -} - -type NamespacePhase string - -// These are the valid phases of a namespace. -const ( - // NamespaceActive means the namespace is available for use in the system - NamespaceActive NamespacePhase = "Active" - // NamespaceTerminating means the namespace is undergoing graceful termination - NamespaceTerminating NamespacePhase = "Terminating" -) - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// A namespace provides a scope for Names. -// Use of multiple namespaces is optional -type Namespace struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // Spec defines the behavior of the Namespace. - // +optional - Spec NamespaceSpec - - // Status describes the current status of a Namespace - // +optional - Status NamespaceStatus -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// NamespaceList is a list of Namespaces. -type NamespaceList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - Items []Namespace -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Binding ties one object to another; for example, a pod is bound to a node by a scheduler. -// Deprecated in 1.7, please use the bindings subresource of pods instead. -type Binding struct { - metav1.TypeMeta - // ObjectMeta describes the object that is being bound. - // +optional - metav1.ObjectMeta - - // Target is the object to bind to. - Target ObjectReference -} - -// Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out. -type Preconditions struct { - // Specifies the target UID. - // +optional - UID *types.UID -} - -// DeletionPropagation decides whether and how garbage collection will be performed. -type DeletionPropagation string - -const ( - // Orphans the dependents. - DeletePropagationOrphan DeletionPropagation = "Orphan" - // Deletes the object from the key-value store, the garbage collector will delete the dependents in the background. - DeletePropagationBackground DeletionPropagation = "Background" - // The object exists in the key-value store until the garbage collector deletes all the dependents whose ownerReference.blockOwnerDeletion=true from the key-value store. - // API sever will put the "DeletingDependents" finalizer on the object, and sets its deletionTimestamp. - // This policy is cascading, i.e., the dependents will be deleted with Foreground. - DeletePropagationForeground DeletionPropagation = "Foreground" -) - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// DeleteOptions may be provided when deleting an API object -// DEPRECATED: This type has been moved to meta/v1 and will be removed soon. -type DeleteOptions struct { - metav1.TypeMeta - - // Optional duration in seconds before the object should be deleted. Value must be non-negative integer. - // The value zero indicates delete immediately. If this value is nil, the default grace period for the - // specified type will be used. - // +optional - GracePeriodSeconds *int64 - - // Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be - // returned. - // +optional - Preconditions *Preconditions - - // Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. - // Should the dependent objects be orphaned. If true/false, the "orphan" - // finalizer will be added to/removed from the object's finalizers list. - // Either this field or PropagationPolicy may be set, but not both. - // +optional - OrphanDependents *bool - - // Whether and how garbage collection will be performed. - // Either this field or OrphanDependents may be set, but not both. - // The default policy is decided by the existing finalizer set in the - // metadata.finalizers and the resource-specific default policy. - // +optional - PropagationPolicy *DeletionPropagation -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ListOptions is the query options to a standard REST list call, and has future support for -// watch calls. -// DEPRECATED: This type has been moved to meta/v1 and will be removed soon. -type ListOptions struct { - metav1.TypeMeta - - // A selector based on labels - LabelSelector labels.Selector - // A selector based on fields - FieldSelector fields.Selector - - // If true, partially initialized resources are included in the response. - IncludeUninitialized bool - - // If true, watch for changes to this list - Watch bool - // When specified with a watch call, shows changes that occur after that particular version of a resource. - // Defaults to changes from the beginning of history. - // When specified for list: - // - if unset, then the result is returned from remote storage based on quorum-read flag; - // - if it's 0, then we simply return what we currently have in cache, no guarantee; - // - if set to non zero, then the result is at least as fresh as given rv. - ResourceVersion string - // Timeout for the list/watch call. - TimeoutSeconds *int64 -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PodLogOptions is the query options for a Pod's logs REST call -type PodLogOptions struct { - metav1.TypeMeta - - // Container for which to return logs - Container string - // If true, follow the logs for the pod - Follow bool - // If true, return previous terminated container logs - Previous bool - // A relative time in seconds before the current time from which to show logs. If this value - // precedes the time a pod was started, only logs since the pod start will be returned. - // If this value is in the future, no logs will be returned. - // Only one of sinceSeconds or sinceTime may be specified. - SinceSeconds *int64 - // An RFC3339 timestamp from which to show logs. If this value - // precedes the time a pod was started, only logs since the pod start will be returned. - // If this value is in the future, no logs will be returned. - // Only one of sinceSeconds or sinceTime may be specified. - SinceTime *metav1.Time - // If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line - // of log output. - Timestamps bool - // If set, the number of lines from the end of the logs to show. If not specified, - // logs are shown from the creation of the container or sinceSeconds or sinceTime - TailLines *int64 - // If set, the number of bytes to read from the server before terminating the - // log output. This may not display a complete final line of logging, and may return - // slightly more or slightly less than the specified limit. - LimitBytes *int64 -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PodAttachOptions is the query options to a Pod's remote attach call -// TODO: merge w/ PodExecOptions below for stdin, stdout, etc -type PodAttachOptions struct { - metav1.TypeMeta - - // Stdin if true indicates that stdin is to be redirected for the attach call - // +optional - Stdin bool - - // Stdout if true indicates that stdout is to be redirected for the attach call - // +optional - Stdout bool - - // Stderr if true indicates that stderr is to be redirected for the attach call - // +optional - Stderr bool - - // TTY if true indicates that a tty will be allocated for the attach call - // +optional - TTY bool - - // Container to attach to. - // +optional - Container string -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PodExecOptions is the query options to a Pod's remote exec call -type PodExecOptions struct { - metav1.TypeMeta - - // Stdin if true indicates that stdin is to be redirected for the exec call - Stdin bool - - // Stdout if true indicates that stdout is to be redirected for the exec call - Stdout bool - - // Stderr if true indicates that stderr is to be redirected for the exec call - Stderr bool - - // TTY if true indicates that a tty will be allocated for the exec call - TTY bool - - // Container in which to execute the command. - Container string - - // Command is the remote command to execute; argv array; not executed within a shell. - Command []string -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PodPortForwardOptions is the query options to a Pod's port forward call -type PodPortForwardOptions struct { - metav1.TypeMeta - - // The list of ports to forward - // +optional - Ports []int32 -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PodProxyOptions is the query options to a Pod's proxy call -type PodProxyOptions struct { - metav1.TypeMeta - - // Path is the URL path to use for the current proxy request - Path string -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// NodeProxyOptions is the query options to a Node's proxy call -type NodeProxyOptions struct { - metav1.TypeMeta - - // Path is the URL path to use for the current proxy request - Path string -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ServiceProxyOptions is the query options to a Service's proxy call. -type ServiceProxyOptions struct { - metav1.TypeMeta - - // Path is the part of URLs that include service endpoints, suffixes, - // and parameters to use for the current proxy request to service. - // For example, the whole request URL is - // http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. - // Path is _search?q=user:kimchy. - Path string -} - -// ObjectReference contains enough information to let you inspect or modify the referred object. -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -type ObjectReference struct { - // +optional - Kind string - // +optional - Namespace string - // +optional - Name string - // +optional - UID types.UID - // +optional - APIVersion string - // +optional - ResourceVersion string - - // Optional. If referring to a piece of an object instead of an entire object, this string - // should contain information to identify the sub-object. For example, if the object - // reference is to a container within a pod, this would take on a value like: - // "spec.containers{name}" (where "name" refers to the name of the container that triggered - // the event) or if no container name is specified "spec.containers[2]" (container with - // index 2 in this pod). This syntax is chosen only to have some well-defined way of - // referencing a part of an object. - // TODO: this design is not final and this field is subject to change in the future. - // +optional - FieldPath string -} - -// LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace. -type LocalObjectReference struct { - //TODO: Add other useful fields. apiVersion, kind, uid? - Name string -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -type SerializedReference struct { - metav1.TypeMeta - // +optional - Reference ObjectReference -} - -type EventSource struct { - // Component from which the event is generated. - // +optional - Component string - // Node name on which the event is generated. - // +optional - Host string -} - -// Valid values for event types (new types could be added in future) -const ( - // Information only and will not cause any problems - EventTypeNormal string = "Normal" - // These events are to warn that something might go wrong - EventTypeWarning string = "Warning" -) - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Event is a report of an event somewhere in the cluster. -// TODO: Decide whether to store these separately or with the object they apply to. -type Event struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // Required. The object that this event is about. - // +optional - InvolvedObject ObjectReference - - // Optional; this should be a short, machine understandable string that gives the reason - // for this event being generated. For example, if the event is reporting that a container - // can't start, the Reason might be "ImageNotFound". - // TODO: provide exact specification for format. - // +optional - Reason string - - // Optional. A human-readable description of the status of this operation. - // TODO: decide on maximum length. - // +optional - Message string - - // Optional. The component reporting this event. Should be a short machine understandable string. - // +optional - Source EventSource - - // The time at which the event was first recorded. (Time of server receipt is in TypeMeta.) - // +optional - FirstTimestamp metav1.Time - - // The time at which the most recent occurrence of this event was recorded. - // +optional - LastTimestamp metav1.Time - - // The number of times this event has occurred. - // +optional - Count int32 - - // Type of this event (Normal, Warning), new types could be added in the future. - // +optional - Type string -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// EventList is a list of events. -type EventList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - Items []Event -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// List holds a list of objects, which may not be known by the server. -type List metainternalversion.List - -// A type of object that is limited -type LimitType string - -const ( - // Limit that applies to all pods in a namespace - LimitTypePod LimitType = "Pod" - // Limit that applies to all containers in a namespace - LimitTypeContainer LimitType = "Container" - // Limit that applies to all persistent volume claims in a namespace - LimitTypePersistentVolumeClaim LimitType = "PersistentVolumeClaim" -) - -// LimitRangeItem defines a min/max usage limit for any resource that matches on kind -type LimitRangeItem struct { - // Type of resource that this limit applies to - // +optional - Type LimitType - // Max usage constraints on this kind by resource name - // +optional - Max ResourceList - // Min usage constraints on this kind by resource name - // +optional - Min ResourceList - // Default resource requirement limit value by resource name. - // +optional - Default ResourceList - // DefaultRequest resource requirement request value by resource name. - // +optional - DefaultRequest ResourceList - // MaxLimitRequestRatio represents the max burst value for the named resource - // +optional - MaxLimitRequestRatio ResourceList -} - -// LimitRangeSpec defines a min/max usage limit for resources that match on kind -type LimitRangeSpec struct { - // Limits is the list of LimitRangeItem objects that are enforced - Limits []LimitRangeItem -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// LimitRange sets resource usage limits for each kind of resource in a Namespace -type LimitRange struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // Spec defines the limits enforced - // +optional - Spec LimitRangeSpec -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// LimitRangeList is a list of LimitRange items. -type LimitRangeList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - // Items is a list of LimitRange objects - Items []LimitRange -} - -// The following identify resource constants for Kubernetes object types -const ( - // Pods, number - ResourcePods ResourceName = "pods" - // Services, number - ResourceServices ResourceName = "services" - // ReplicationControllers, number - ResourceReplicationControllers ResourceName = "replicationcontrollers" - // ResourceQuotas, number - ResourceQuotas ResourceName = "resourcequotas" - // ResourceSecrets, number - ResourceSecrets ResourceName = "secrets" - // ResourceConfigMaps, number - ResourceConfigMaps ResourceName = "configmaps" - // ResourcePersistentVolumeClaims, number - ResourcePersistentVolumeClaims ResourceName = "persistentvolumeclaims" - // ResourceServicesNodePorts, number - ResourceServicesNodePorts ResourceName = "services.nodeports" - // ResourceServicesLoadBalancers, number - ResourceServicesLoadBalancers ResourceName = "services.loadbalancers" - // CPU request, in cores. (500m = .5 cores) - ResourceRequestsCPU ResourceName = "requests.cpu" - // Memory request, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) - ResourceRequestsMemory ResourceName = "requests.memory" - // Storage request, in bytes - ResourceRequestsStorage ResourceName = "requests.storage" - // Local ephemeral storage request, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) - ResourceRequestsEphemeralStorage ResourceName = "requests.ephemeral-storage" - // CPU limit, in cores. (500m = .5 cores) - ResourceLimitsCPU ResourceName = "limits.cpu" - // Memory limit, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) - ResourceLimitsMemory ResourceName = "limits.memory" - // Local ephemeral storage limit, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) - ResourceLimitsEphemeralStorage ResourceName = "limits.ephemeral-storage" -) - -// A ResourceQuotaScope defines a filter that must match each object tracked by a quota -type ResourceQuotaScope string - -const ( - // Match all pod objects where spec.activeDeadlineSeconds - ResourceQuotaScopeTerminating ResourceQuotaScope = "Terminating" - // Match all pod objects where !spec.activeDeadlineSeconds - ResourceQuotaScopeNotTerminating ResourceQuotaScope = "NotTerminating" - // Match all pod objects that have best effort quality of service - ResourceQuotaScopeBestEffort ResourceQuotaScope = "BestEffort" - // Match all pod objects that do not have best effort quality of service - ResourceQuotaScopeNotBestEffort ResourceQuotaScope = "NotBestEffort" -) - -// ResourceQuotaSpec defines the desired hard limits to enforce for Quota -type ResourceQuotaSpec struct { - // Hard is the set of desired hard limits for each named resource - // +optional - Hard ResourceList - // A collection of filters that must match each object tracked by a quota. - // If not specified, the quota matches all objects. - // +optional - Scopes []ResourceQuotaScope -} - -// ResourceQuotaStatus defines the enforced hard limits and observed use -type ResourceQuotaStatus struct { - // Hard is the set of enforced hard limits for each named resource - // +optional - Hard ResourceList - // Used is the current observed total usage of the resource in the namespace - // +optional - Used ResourceList -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ResourceQuota sets aggregate quota restrictions enforced per namespace -type ResourceQuota struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // Spec defines the desired quota - // +optional - Spec ResourceQuotaSpec - - // Status defines the actual enforced quota and its current usage - // +optional - Status ResourceQuotaStatus -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ResourceQuotaList is a list of ResourceQuota items -type ResourceQuotaList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - // Items is a list of ResourceQuota objects - Items []ResourceQuota -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Secret holds secret data of a certain type. The total bytes of the values in -// the Data field must be less than MaxSecretSize bytes. -type Secret struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // Data contains the secret data. Each key must consist of alphanumeric - // characters, '-', '_' or '.'. The serialized form of the secret data is a - // base64 encoded string, representing the arbitrary (possibly non-string) - // data value here. - // +optional - Data map[string][]byte - - // Used to facilitate programmatic handling of secret data. - // +optional - Type SecretType -} - -const MaxSecretSize = 1 * 1024 * 1024 - -type SecretType string - -const ( - // SecretTypeOpaque is the default; arbitrary user-defined data - SecretTypeOpaque SecretType = "Opaque" - - // SecretTypeServiceAccountToken contains a token that identifies a service account to the API - // - // Required fields: - // - Secret.Annotations["kubernetes.io/service-account.name"] - the name of the ServiceAccount the token identifies - // - Secret.Annotations["kubernetes.io/service-account.uid"] - the UID of the ServiceAccount the token identifies - // - Secret.Data["token"] - a token that identifies the service account to the API - SecretTypeServiceAccountToken SecretType = "kubernetes.io/service-account-token" - - // ServiceAccountNameKey is the key of the required annotation for SecretTypeServiceAccountToken secrets - ServiceAccountNameKey = "kubernetes.io/service-account.name" - // ServiceAccountUIDKey is the key of the required annotation for SecretTypeServiceAccountToken secrets - ServiceAccountUIDKey = "kubernetes.io/service-account.uid" - // ServiceAccountTokenKey is the key of the required data for SecretTypeServiceAccountToken secrets - ServiceAccountTokenKey = "token" - // ServiceAccountKubeconfigKey is the key of the optional kubeconfig data for SecretTypeServiceAccountToken secrets - ServiceAccountKubeconfigKey = "kubernetes.kubeconfig" - // ServiceAccountRootCAKey is the key of the optional root certificate authority for SecretTypeServiceAccountToken secrets - ServiceAccountRootCAKey = "ca.crt" - // ServiceAccountNamespaceKey is the key of the optional namespace to use as the default for namespaced API calls - ServiceAccountNamespaceKey = "namespace" - - // SecretTypeDockercfg contains a dockercfg file that follows the same format rules as ~/.dockercfg - // - // Required fields: - // - Secret.Data[".dockercfg"] - a serialized ~/.dockercfg file - SecretTypeDockercfg SecretType = "kubernetes.io/dockercfg" - - // DockerConfigKey is the key of the required data for SecretTypeDockercfg secrets - DockerConfigKey = ".dockercfg" - - // SecretTypeDockerConfigJson contains a dockercfg file that follows the same format rules as ~/.docker/config.json - // - // Required fields: - // - Secret.Data[".dockerconfigjson"] - a serialized ~/.docker/config.json file - SecretTypeDockerConfigJson SecretType = "kubernetes.io/dockerconfigjson" - - // DockerConfigJsonKey is the key of the required data for SecretTypeDockerConfigJson secrets - DockerConfigJsonKey = ".dockerconfigjson" - - // SecretTypeBasicAuth contains data needed for basic authentication. - // - // Required at least one of fields: - // - Secret.Data["username"] - username used for authentication - // - Secret.Data["password"] - password or token needed for authentication - SecretTypeBasicAuth SecretType = "kubernetes.io/basic-auth" - - // BasicAuthUsernameKey is the key of the username for SecretTypeBasicAuth secrets - BasicAuthUsernameKey = "username" - // BasicAuthPasswordKey is the key of the password or token for SecretTypeBasicAuth secrets - BasicAuthPasswordKey = "password" - - // SecretTypeSSHAuth contains data needed for SSH authetication. - // - // Required field: - // - Secret.Data["ssh-privatekey"] - private SSH key needed for authentication - SecretTypeSSHAuth SecretType = "kubernetes.io/ssh-auth" - - // SSHAuthPrivateKey is the key of the required SSH private key for SecretTypeSSHAuth secrets - SSHAuthPrivateKey = "ssh-privatekey" - - // SecretTypeTLS contains information about a TLS client or server secret. It - // is primarily used with TLS termination of the Ingress resource, but may be - // used in other types. - // - // Required fields: - // - Secret.Data["tls.key"] - TLS private key. - // Secret.Data["tls.crt"] - TLS certificate. - // TODO: Consider supporting different formats, specifying CA/destinationCA. - SecretTypeTLS SecretType = "kubernetes.io/tls" - - // TLSCertKey is the key for tls certificates in a TLS secret. - TLSCertKey = "tls.crt" - // TLSPrivateKeyKey is the key for the private key field in a TLS secret. - TLSPrivateKeyKey = "tls.key" - // SecretTypeBootstrapToken is used during the automated bootstrap process (first - // implemented by kubeadm). It stores tokens that are used to sign well known - // ConfigMaps. They are used for authn. - SecretTypeBootstrapToken SecretType = "bootstrap.kubernetes.io/token" -) - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -type SecretList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - Items []Secret -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ConfigMap holds configuration data for components or applications to consume. -type ConfigMap struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // Data contains the configuration data. - // Each key must consist of alphanumeric characters, '-', '_' or '.'. - // +optional - Data map[string]string -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ConfigMapList is a resource containing a list of ConfigMap objects. -type ConfigMapList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - // Items is the list of ConfigMaps. - Items []ConfigMap -} - -// These constants are for remote command execution and port forwarding and are -// used by both the client side and server side components. -// -// This is probably not the ideal place for them, but it didn't seem worth it -// to create pkg/exec and pkg/portforward just to contain a single file with -// constants in it. Suggestions for more appropriate alternatives are -// definitely welcome! -const ( - // Enable stdin for remote command execution - ExecStdinParam = "input" - // Enable stdout for remote command execution - ExecStdoutParam = "output" - // Enable stderr for remote command execution - ExecStderrParam = "error" - // Enable TTY for remote command execution - ExecTTYParam = "tty" - // Command to run for remote command execution - ExecCommandParam = "command" - - // Name of header that specifies stream type - StreamType = "streamType" - // Value for streamType header for stdin stream - StreamTypeStdin = "stdin" - // Value for streamType header for stdout stream - StreamTypeStdout = "stdout" - // Value for streamType header for stderr stream - StreamTypeStderr = "stderr" - // Value for streamType header for data stream - StreamTypeData = "data" - // Value for streamType header for error stream - StreamTypeError = "error" - // Value for streamType header for terminal resize stream - StreamTypeResize = "resize" - - // Name of header that specifies the port being forwarded - PortHeader = "port" - // Name of header that specifies a request ID used to associate the error - // and data streams for a single forwarded connection - PortForwardRequestIDHeader = "requestID" -) - -// Type and constants for component health validation. -type ComponentConditionType string - -// These are the valid conditions for the component. -const ( - ComponentHealthy ComponentConditionType = "Healthy" -) - -type ComponentCondition struct { - Type ComponentConditionType - Status ConditionStatus - // +optional - Message string - // +optional - Error string -} - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ComponentStatus (and ComponentStatusList) holds the cluster validation info. -type ComponentStatus struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - - // +optional - Conditions []ComponentCondition -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -type ComponentStatusList struct { - metav1.TypeMeta - // +optional - metav1.ListMeta - - Items []ComponentStatus -} - -// SecurityContext holds security configuration that will be applied to a container. -// Some fields are present in both SecurityContext and PodSecurityContext. When both -// are set, the values in SecurityContext take precedence. -type SecurityContext struct { - // The capabilities to add/drop when running containers. - // Defaults to the default set of capabilities granted by the container runtime. - // +optional - Capabilities *Capabilities - // Run container in privileged mode. - // Processes in privileged containers are essentially equivalent to root on the host. - // Defaults to false. - // +optional - Privileged *bool - // The SELinux context to be applied to the container. - // If unspecified, the container runtime will allocate a random SELinux context for each - // container. May also be set in PodSecurityContext. If set in both SecurityContext and - // PodSecurityContext, the value specified in SecurityContext takes precedence. - // +optional - SELinuxOptions *SELinuxOptions - // The UID to run the entrypoint of the container process. - // Defaults to user specified in image metadata if unspecified. - // May also be set in PodSecurityContext. If set in both SecurityContext and - // PodSecurityContext, the value specified in SecurityContext takes precedence. - // +optional - RunAsUser *int64 - // Indicates that the container must run as a non-root user. - // If true, the Kubelet will validate the image at runtime to ensure that it - // does not run as UID 0 (root) and fail to start the container if it does. - // If unset or false, no such validation will be performed. - // May also be set in PodSecurityContext. If set in both SecurityContext and - // PodSecurityContext, the value specified in SecurityContext takes precedence. - // +optional - RunAsNonRoot *bool - // The read-only root filesystem allows you to restrict the locations that an application can write - // files to, ensuring the persistent data can only be written to mounts. - // +optional - ReadOnlyRootFilesystem *bool - // AllowPrivilegeEscalation controls whether a process can gain more - // privileges than its parent process. This bool directly controls if - // the no_new_privs flag will be set on the container process. - // +optional - AllowPrivilegeEscalation *bool -} - -// SELinuxOptions are the labels to be applied to the container. -type SELinuxOptions struct { - // SELinux user label - // +optional - User string - // SELinux role label - // +optional - Role string - // SELinux type label - // +optional - Type string - // SELinux level label. - // +optional - Level string -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// RangeAllocation is an opaque API object (not exposed to end users) that can be persisted to record -// the global allocation state of the cluster. The schema of Range and Data generic, in that Range -// should be a string representation of the inputs to a range (for instance, for IP allocation it -// might be a CIDR) and Data is an opaque blob understood by an allocator which is typically a -// binary range. Consumers should use annotations to record additional information (schema version, -// data encoding hints). A range allocation should *ALWAYS* be recreatable at any time by observation -// of the cluster, thus the object is less strongly typed than most. -type RangeAllocation struct { - metav1.TypeMeta - // +optional - metav1.ObjectMeta - // A string representing a unique label for a range of resources, such as a CIDR "10.0.0.0/8" or - // port range "10000-30000". Range is not strongly schema'd here. The Range is expected to define - // a start and end unless there is an implicit end. - Range string - // A byte array representing the serialized state of a range allocation. Additional clarifiers on - // the type or format of data should be represented with annotations. For IP allocations, this is - // represented as a bit array starting at the base IP of the CIDR in Range, with each bit representing - // a single allocated address (the fifth bit on CIDR 10.0.0.0/8 is 10.0.0.4). - Data []byte -} - -const ( - // "default-scheduler" is the name of default scheduler. - DefaultSchedulerName = "default-scheduler" - - // RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule - // corresponding to every RequiredDuringScheduling affinity rule. - // When the --hard-pod-affinity-weight scheduler flag is not specified, - // DefaultHardPodAffinityWeight defines the weight of the implicit PreferredDuringScheduling affinity rule. - DefaultHardPodAffinitySymmetricWeight int = 1 -) diff --git a/vendor/k8s.io/kubernetes/pkg/api/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/api/zz_generated.deepcopy.go deleted file mode 100644 index 938b7316a05f..000000000000 --- a/vendor/k8s.io/kubernetes/pkg/api/zz_generated.deepcopy.go +++ /dev/null @@ -1,6320 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by deepcopy-gen. Do not edit it manually! - -package api - -import ( - resource "k8s.io/apimachinery/pkg/api/resource" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" - runtime "k8s.io/apimachinery/pkg/runtime" - types "k8s.io/apimachinery/pkg/types" - reflect "reflect" -) - -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*AWSElasticBlockStoreVolumeSource).DeepCopyInto(out.(*AWSElasticBlockStoreVolumeSource)) - return nil - }, InType: reflect.TypeOf(&AWSElasticBlockStoreVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Affinity).DeepCopyInto(out.(*Affinity)) - return nil - }, InType: reflect.TypeOf(&Affinity{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*AttachedVolume).DeepCopyInto(out.(*AttachedVolume)) - return nil - }, InType: reflect.TypeOf(&AttachedVolume{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*AvoidPods).DeepCopyInto(out.(*AvoidPods)) - return nil - }, InType: reflect.TypeOf(&AvoidPods{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*AzureDiskVolumeSource).DeepCopyInto(out.(*AzureDiskVolumeSource)) - return nil - }, InType: reflect.TypeOf(&AzureDiskVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*AzureFilePersistentVolumeSource).DeepCopyInto(out.(*AzureFilePersistentVolumeSource)) - return nil - }, InType: reflect.TypeOf(&AzureFilePersistentVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*AzureFileVolumeSource).DeepCopyInto(out.(*AzureFileVolumeSource)) - return nil - }, InType: reflect.TypeOf(&AzureFileVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Binding).DeepCopyInto(out.(*Binding)) - return nil - }, InType: reflect.TypeOf(&Binding{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Capabilities).DeepCopyInto(out.(*Capabilities)) - return nil - }, InType: reflect.TypeOf(&Capabilities{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CephFSPersistentVolumeSource).DeepCopyInto(out.(*CephFSPersistentVolumeSource)) - return nil - }, InType: reflect.TypeOf(&CephFSPersistentVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CephFSVolumeSource).DeepCopyInto(out.(*CephFSVolumeSource)) - return nil - }, InType: reflect.TypeOf(&CephFSVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CinderVolumeSource).DeepCopyInto(out.(*CinderVolumeSource)) - return nil - }, InType: reflect.TypeOf(&CinderVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ClientIPConfig).DeepCopyInto(out.(*ClientIPConfig)) - return nil - }, InType: reflect.TypeOf(&ClientIPConfig{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ComponentCondition).DeepCopyInto(out.(*ComponentCondition)) - return nil - }, InType: reflect.TypeOf(&ComponentCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ComponentStatus).DeepCopyInto(out.(*ComponentStatus)) - return nil - }, InType: reflect.TypeOf(&ComponentStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ComponentStatusList).DeepCopyInto(out.(*ComponentStatusList)) - return nil - }, InType: reflect.TypeOf(&ComponentStatusList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ConfigMap).DeepCopyInto(out.(*ConfigMap)) - return nil - }, InType: reflect.TypeOf(&ConfigMap{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ConfigMapEnvSource).DeepCopyInto(out.(*ConfigMapEnvSource)) - return nil - }, InType: reflect.TypeOf(&ConfigMapEnvSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ConfigMapKeySelector).DeepCopyInto(out.(*ConfigMapKeySelector)) - return nil - }, InType: reflect.TypeOf(&ConfigMapKeySelector{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ConfigMapList).DeepCopyInto(out.(*ConfigMapList)) - return nil - }, InType: reflect.TypeOf(&ConfigMapList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ConfigMapProjection).DeepCopyInto(out.(*ConfigMapProjection)) - return nil - }, InType: reflect.TypeOf(&ConfigMapProjection{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ConfigMapVolumeSource).DeepCopyInto(out.(*ConfigMapVolumeSource)) - return nil - }, InType: reflect.TypeOf(&ConfigMapVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Container).DeepCopyInto(out.(*Container)) - return nil - }, InType: reflect.TypeOf(&Container{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ContainerImage).DeepCopyInto(out.(*ContainerImage)) - return nil - }, InType: reflect.TypeOf(&ContainerImage{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ContainerPort).DeepCopyInto(out.(*ContainerPort)) - return nil - }, InType: reflect.TypeOf(&ContainerPort{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ContainerState).DeepCopyInto(out.(*ContainerState)) - return nil - }, InType: reflect.TypeOf(&ContainerState{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ContainerStateRunning).DeepCopyInto(out.(*ContainerStateRunning)) - return nil - }, InType: reflect.TypeOf(&ContainerStateRunning{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ContainerStateTerminated).DeepCopyInto(out.(*ContainerStateTerminated)) - return nil - }, InType: reflect.TypeOf(&ContainerStateTerminated{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ContainerStateWaiting).DeepCopyInto(out.(*ContainerStateWaiting)) - return nil - }, InType: reflect.TypeOf(&ContainerStateWaiting{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ContainerStatus).DeepCopyInto(out.(*ContainerStatus)) - return nil - }, InType: reflect.TypeOf(&ContainerStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DaemonEndpoint).DeepCopyInto(out.(*DaemonEndpoint)) - return nil - }, InType: reflect.TypeOf(&DaemonEndpoint{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DeleteOptions).DeepCopyInto(out.(*DeleteOptions)) - return nil - }, InType: reflect.TypeOf(&DeleteOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DownwardAPIProjection).DeepCopyInto(out.(*DownwardAPIProjection)) - return nil - }, InType: reflect.TypeOf(&DownwardAPIProjection{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DownwardAPIVolumeFile).DeepCopyInto(out.(*DownwardAPIVolumeFile)) - return nil - }, InType: reflect.TypeOf(&DownwardAPIVolumeFile{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DownwardAPIVolumeSource).DeepCopyInto(out.(*DownwardAPIVolumeSource)) - return nil - }, InType: reflect.TypeOf(&DownwardAPIVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*EmptyDirVolumeSource).DeepCopyInto(out.(*EmptyDirVolumeSource)) - return nil - }, InType: reflect.TypeOf(&EmptyDirVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*EndpointAddress).DeepCopyInto(out.(*EndpointAddress)) - return nil - }, InType: reflect.TypeOf(&EndpointAddress{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*EndpointPort).DeepCopyInto(out.(*EndpointPort)) - return nil - }, InType: reflect.TypeOf(&EndpointPort{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*EndpointSubset).DeepCopyInto(out.(*EndpointSubset)) - return nil - }, InType: reflect.TypeOf(&EndpointSubset{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Endpoints).DeepCopyInto(out.(*Endpoints)) - return nil - }, InType: reflect.TypeOf(&Endpoints{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*EndpointsList).DeepCopyInto(out.(*EndpointsList)) - return nil - }, InType: reflect.TypeOf(&EndpointsList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*EnvFromSource).DeepCopyInto(out.(*EnvFromSource)) - return nil - }, InType: reflect.TypeOf(&EnvFromSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*EnvVar).DeepCopyInto(out.(*EnvVar)) - return nil - }, InType: reflect.TypeOf(&EnvVar{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*EnvVarSource).DeepCopyInto(out.(*EnvVarSource)) - return nil - }, InType: reflect.TypeOf(&EnvVarSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Event).DeepCopyInto(out.(*Event)) - return nil - }, InType: reflect.TypeOf(&Event{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*EventList).DeepCopyInto(out.(*EventList)) - return nil - }, InType: reflect.TypeOf(&EventList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*EventSource).DeepCopyInto(out.(*EventSource)) - return nil - }, InType: reflect.TypeOf(&EventSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ExecAction).DeepCopyInto(out.(*ExecAction)) - return nil - }, InType: reflect.TypeOf(&ExecAction{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*FCVolumeSource).DeepCopyInto(out.(*FCVolumeSource)) - return nil - }, InType: reflect.TypeOf(&FCVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*FlexVolumeSource).DeepCopyInto(out.(*FlexVolumeSource)) - return nil - }, InType: reflect.TypeOf(&FlexVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*FlockerVolumeSource).DeepCopyInto(out.(*FlockerVolumeSource)) - return nil - }, InType: reflect.TypeOf(&FlockerVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*GCEPersistentDiskVolumeSource).DeepCopyInto(out.(*GCEPersistentDiskVolumeSource)) - return nil - }, InType: reflect.TypeOf(&GCEPersistentDiskVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*GitRepoVolumeSource).DeepCopyInto(out.(*GitRepoVolumeSource)) - return nil - }, InType: reflect.TypeOf(&GitRepoVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*GlusterfsVolumeSource).DeepCopyInto(out.(*GlusterfsVolumeSource)) - return nil - }, InType: reflect.TypeOf(&GlusterfsVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*HTTPGetAction).DeepCopyInto(out.(*HTTPGetAction)) - return nil - }, InType: reflect.TypeOf(&HTTPGetAction{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*HTTPHeader).DeepCopyInto(out.(*HTTPHeader)) - return nil - }, InType: reflect.TypeOf(&HTTPHeader{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Handler).DeepCopyInto(out.(*Handler)) - return nil - }, InType: reflect.TypeOf(&Handler{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*HostAlias).DeepCopyInto(out.(*HostAlias)) - return nil - }, InType: reflect.TypeOf(&HostAlias{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*HostPathVolumeSource).DeepCopyInto(out.(*HostPathVolumeSource)) - return nil - }, InType: reflect.TypeOf(&HostPathVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ISCSIVolumeSource).DeepCopyInto(out.(*ISCSIVolumeSource)) - return nil - }, InType: reflect.TypeOf(&ISCSIVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*KeyToPath).DeepCopyInto(out.(*KeyToPath)) - return nil - }, InType: reflect.TypeOf(&KeyToPath{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Lifecycle).DeepCopyInto(out.(*Lifecycle)) - return nil - }, InType: reflect.TypeOf(&Lifecycle{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*LimitRange).DeepCopyInto(out.(*LimitRange)) - return nil - }, InType: reflect.TypeOf(&LimitRange{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*LimitRangeItem).DeepCopyInto(out.(*LimitRangeItem)) - return nil - }, InType: reflect.TypeOf(&LimitRangeItem{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*LimitRangeList).DeepCopyInto(out.(*LimitRangeList)) - return nil - }, InType: reflect.TypeOf(&LimitRangeList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*LimitRangeSpec).DeepCopyInto(out.(*LimitRangeSpec)) - return nil - }, InType: reflect.TypeOf(&LimitRangeSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*List).DeepCopyInto(out.(*List)) - return nil - }, InType: reflect.TypeOf(&List{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ListOptions).DeepCopyInto(out.(*ListOptions)) - return nil - }, InType: reflect.TypeOf(&ListOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*LoadBalancerIngress).DeepCopyInto(out.(*LoadBalancerIngress)) - return nil - }, InType: reflect.TypeOf(&LoadBalancerIngress{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*LoadBalancerStatus).DeepCopyInto(out.(*LoadBalancerStatus)) - return nil - }, InType: reflect.TypeOf(&LoadBalancerStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*LocalObjectReference).DeepCopyInto(out.(*LocalObjectReference)) - return nil - }, InType: reflect.TypeOf(&LocalObjectReference{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*LocalVolumeSource).DeepCopyInto(out.(*LocalVolumeSource)) - return nil - }, InType: reflect.TypeOf(&LocalVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NFSVolumeSource).DeepCopyInto(out.(*NFSVolumeSource)) - return nil - }, InType: reflect.TypeOf(&NFSVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Namespace).DeepCopyInto(out.(*Namespace)) - return nil - }, InType: reflect.TypeOf(&Namespace{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NamespaceList).DeepCopyInto(out.(*NamespaceList)) - return nil - }, InType: reflect.TypeOf(&NamespaceList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NamespaceSpec).DeepCopyInto(out.(*NamespaceSpec)) - return nil - }, InType: reflect.TypeOf(&NamespaceSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NamespaceStatus).DeepCopyInto(out.(*NamespaceStatus)) - return nil - }, InType: reflect.TypeOf(&NamespaceStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Node).DeepCopyInto(out.(*Node)) - return nil - }, InType: reflect.TypeOf(&Node{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeAddress).DeepCopyInto(out.(*NodeAddress)) - return nil - }, InType: reflect.TypeOf(&NodeAddress{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeAffinity).DeepCopyInto(out.(*NodeAffinity)) - return nil - }, InType: reflect.TypeOf(&NodeAffinity{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeCondition).DeepCopyInto(out.(*NodeCondition)) - return nil - }, InType: reflect.TypeOf(&NodeCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeConfigSource).DeepCopyInto(out.(*NodeConfigSource)) - return nil - }, InType: reflect.TypeOf(&NodeConfigSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeDaemonEndpoints).DeepCopyInto(out.(*NodeDaemonEndpoints)) - return nil - }, InType: reflect.TypeOf(&NodeDaemonEndpoints{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeList).DeepCopyInto(out.(*NodeList)) - return nil - }, InType: reflect.TypeOf(&NodeList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeProxyOptions).DeepCopyInto(out.(*NodeProxyOptions)) - return nil - }, InType: reflect.TypeOf(&NodeProxyOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeResources).DeepCopyInto(out.(*NodeResources)) - return nil - }, InType: reflect.TypeOf(&NodeResources{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeSelector).DeepCopyInto(out.(*NodeSelector)) - return nil - }, InType: reflect.TypeOf(&NodeSelector{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeSelectorRequirement).DeepCopyInto(out.(*NodeSelectorRequirement)) - return nil - }, InType: reflect.TypeOf(&NodeSelectorRequirement{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeSelectorTerm).DeepCopyInto(out.(*NodeSelectorTerm)) - return nil - }, InType: reflect.TypeOf(&NodeSelectorTerm{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeSpec).DeepCopyInto(out.(*NodeSpec)) - return nil - }, InType: reflect.TypeOf(&NodeSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeStatus).DeepCopyInto(out.(*NodeStatus)) - return nil - }, InType: reflect.TypeOf(&NodeStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeSystemInfo).DeepCopyInto(out.(*NodeSystemInfo)) - return nil - }, InType: reflect.TypeOf(&NodeSystemInfo{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ObjectFieldSelector).DeepCopyInto(out.(*ObjectFieldSelector)) - return nil - }, InType: reflect.TypeOf(&ObjectFieldSelector{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ObjectMeta).DeepCopyInto(out.(*ObjectMeta)) - return nil - }, InType: reflect.TypeOf(&ObjectMeta{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ObjectReference).DeepCopyInto(out.(*ObjectReference)) - return nil - }, InType: reflect.TypeOf(&ObjectReference{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PersistentVolume).DeepCopyInto(out.(*PersistentVolume)) - return nil - }, InType: reflect.TypeOf(&PersistentVolume{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PersistentVolumeClaim).DeepCopyInto(out.(*PersistentVolumeClaim)) - return nil - }, InType: reflect.TypeOf(&PersistentVolumeClaim{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PersistentVolumeClaimCondition).DeepCopyInto(out.(*PersistentVolumeClaimCondition)) - return nil - }, InType: reflect.TypeOf(&PersistentVolumeClaimCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PersistentVolumeClaimList).DeepCopyInto(out.(*PersistentVolumeClaimList)) - return nil - }, InType: reflect.TypeOf(&PersistentVolumeClaimList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PersistentVolumeClaimSpec).DeepCopyInto(out.(*PersistentVolumeClaimSpec)) - return nil - }, InType: reflect.TypeOf(&PersistentVolumeClaimSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PersistentVolumeClaimStatus).DeepCopyInto(out.(*PersistentVolumeClaimStatus)) - return nil - }, InType: reflect.TypeOf(&PersistentVolumeClaimStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PersistentVolumeClaimVolumeSource).DeepCopyInto(out.(*PersistentVolumeClaimVolumeSource)) - return nil - }, InType: reflect.TypeOf(&PersistentVolumeClaimVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PersistentVolumeList).DeepCopyInto(out.(*PersistentVolumeList)) - return nil - }, InType: reflect.TypeOf(&PersistentVolumeList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PersistentVolumeSource).DeepCopyInto(out.(*PersistentVolumeSource)) - return nil - }, InType: reflect.TypeOf(&PersistentVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PersistentVolumeSpec).DeepCopyInto(out.(*PersistentVolumeSpec)) - return nil - }, InType: reflect.TypeOf(&PersistentVolumeSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PersistentVolumeStatus).DeepCopyInto(out.(*PersistentVolumeStatus)) - return nil - }, InType: reflect.TypeOf(&PersistentVolumeStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PhotonPersistentDiskVolumeSource).DeepCopyInto(out.(*PhotonPersistentDiskVolumeSource)) - return nil - }, InType: reflect.TypeOf(&PhotonPersistentDiskVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Pod).DeepCopyInto(out.(*Pod)) - return nil - }, InType: reflect.TypeOf(&Pod{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodAffinity).DeepCopyInto(out.(*PodAffinity)) - return nil - }, InType: reflect.TypeOf(&PodAffinity{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodAffinityTerm).DeepCopyInto(out.(*PodAffinityTerm)) - return nil - }, InType: reflect.TypeOf(&PodAffinityTerm{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodAntiAffinity).DeepCopyInto(out.(*PodAntiAffinity)) - return nil - }, InType: reflect.TypeOf(&PodAntiAffinity{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodAttachOptions).DeepCopyInto(out.(*PodAttachOptions)) - return nil - }, InType: reflect.TypeOf(&PodAttachOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodCondition).DeepCopyInto(out.(*PodCondition)) - return nil - }, InType: reflect.TypeOf(&PodCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodExecOptions).DeepCopyInto(out.(*PodExecOptions)) - return nil - }, InType: reflect.TypeOf(&PodExecOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodList).DeepCopyInto(out.(*PodList)) - return nil - }, InType: reflect.TypeOf(&PodList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodLogOptions).DeepCopyInto(out.(*PodLogOptions)) - return nil - }, InType: reflect.TypeOf(&PodLogOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodPortForwardOptions).DeepCopyInto(out.(*PodPortForwardOptions)) - return nil - }, InType: reflect.TypeOf(&PodPortForwardOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodProxyOptions).DeepCopyInto(out.(*PodProxyOptions)) - return nil - }, InType: reflect.TypeOf(&PodProxyOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodSecurityContext).DeepCopyInto(out.(*PodSecurityContext)) - return nil - }, InType: reflect.TypeOf(&PodSecurityContext{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodSignature).DeepCopyInto(out.(*PodSignature)) - return nil - }, InType: reflect.TypeOf(&PodSignature{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodSpec).DeepCopyInto(out.(*PodSpec)) - return nil - }, InType: reflect.TypeOf(&PodSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodStatus).DeepCopyInto(out.(*PodStatus)) - return nil - }, InType: reflect.TypeOf(&PodStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodStatusResult).DeepCopyInto(out.(*PodStatusResult)) - return nil - }, InType: reflect.TypeOf(&PodStatusResult{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodTemplate).DeepCopyInto(out.(*PodTemplate)) - return nil - }, InType: reflect.TypeOf(&PodTemplate{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodTemplateList).DeepCopyInto(out.(*PodTemplateList)) - return nil - }, InType: reflect.TypeOf(&PodTemplateList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodTemplateSpec).DeepCopyInto(out.(*PodTemplateSpec)) - return nil - }, InType: reflect.TypeOf(&PodTemplateSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PortworxVolumeSource).DeepCopyInto(out.(*PortworxVolumeSource)) - return nil - }, InType: reflect.TypeOf(&PortworxVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Preconditions).DeepCopyInto(out.(*Preconditions)) - return nil - }, InType: reflect.TypeOf(&Preconditions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PreferAvoidPodsEntry).DeepCopyInto(out.(*PreferAvoidPodsEntry)) - return nil - }, InType: reflect.TypeOf(&PreferAvoidPodsEntry{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PreferredSchedulingTerm).DeepCopyInto(out.(*PreferredSchedulingTerm)) - return nil - }, InType: reflect.TypeOf(&PreferredSchedulingTerm{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Probe).DeepCopyInto(out.(*Probe)) - return nil - }, InType: reflect.TypeOf(&Probe{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ProjectedVolumeSource).DeepCopyInto(out.(*ProjectedVolumeSource)) - return nil - }, InType: reflect.TypeOf(&ProjectedVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*QuobyteVolumeSource).DeepCopyInto(out.(*QuobyteVolumeSource)) - return nil - }, InType: reflect.TypeOf(&QuobyteVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RBDVolumeSource).DeepCopyInto(out.(*RBDVolumeSource)) - return nil - }, InType: reflect.TypeOf(&RBDVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RangeAllocation).DeepCopyInto(out.(*RangeAllocation)) - return nil - }, InType: reflect.TypeOf(&RangeAllocation{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ReplicationController).DeepCopyInto(out.(*ReplicationController)) - return nil - }, InType: reflect.TypeOf(&ReplicationController{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ReplicationControllerCondition).DeepCopyInto(out.(*ReplicationControllerCondition)) - return nil - }, InType: reflect.TypeOf(&ReplicationControllerCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ReplicationControllerList).DeepCopyInto(out.(*ReplicationControllerList)) - return nil - }, InType: reflect.TypeOf(&ReplicationControllerList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ReplicationControllerSpec).DeepCopyInto(out.(*ReplicationControllerSpec)) - return nil - }, InType: reflect.TypeOf(&ReplicationControllerSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ReplicationControllerStatus).DeepCopyInto(out.(*ReplicationControllerStatus)) - return nil - }, InType: reflect.TypeOf(&ReplicationControllerStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ResourceFieldSelector).DeepCopyInto(out.(*ResourceFieldSelector)) - return nil - }, InType: reflect.TypeOf(&ResourceFieldSelector{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ResourceQuota).DeepCopyInto(out.(*ResourceQuota)) - return nil - }, InType: reflect.TypeOf(&ResourceQuota{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ResourceQuotaList).DeepCopyInto(out.(*ResourceQuotaList)) - return nil - }, InType: reflect.TypeOf(&ResourceQuotaList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ResourceQuotaSpec).DeepCopyInto(out.(*ResourceQuotaSpec)) - return nil - }, InType: reflect.TypeOf(&ResourceQuotaSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ResourceQuotaStatus).DeepCopyInto(out.(*ResourceQuotaStatus)) - return nil - }, InType: reflect.TypeOf(&ResourceQuotaStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ResourceRequirements).DeepCopyInto(out.(*ResourceRequirements)) - return nil - }, InType: reflect.TypeOf(&ResourceRequirements{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SELinuxOptions).DeepCopyInto(out.(*SELinuxOptions)) - return nil - }, InType: reflect.TypeOf(&SELinuxOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ScaleIOVolumeSource).DeepCopyInto(out.(*ScaleIOVolumeSource)) - return nil - }, InType: reflect.TypeOf(&ScaleIOVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Secret).DeepCopyInto(out.(*Secret)) - return nil - }, InType: reflect.TypeOf(&Secret{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SecretEnvSource).DeepCopyInto(out.(*SecretEnvSource)) - return nil - }, InType: reflect.TypeOf(&SecretEnvSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SecretKeySelector).DeepCopyInto(out.(*SecretKeySelector)) - return nil - }, InType: reflect.TypeOf(&SecretKeySelector{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SecretList).DeepCopyInto(out.(*SecretList)) - return nil - }, InType: reflect.TypeOf(&SecretList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SecretProjection).DeepCopyInto(out.(*SecretProjection)) - return nil - }, InType: reflect.TypeOf(&SecretProjection{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SecretReference).DeepCopyInto(out.(*SecretReference)) - return nil - }, InType: reflect.TypeOf(&SecretReference{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SecretVolumeSource).DeepCopyInto(out.(*SecretVolumeSource)) - return nil - }, InType: reflect.TypeOf(&SecretVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SecurityContext).DeepCopyInto(out.(*SecurityContext)) - return nil - }, InType: reflect.TypeOf(&SecurityContext{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SerializedReference).DeepCopyInto(out.(*SerializedReference)) - return nil - }, InType: reflect.TypeOf(&SerializedReference{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Service).DeepCopyInto(out.(*Service)) - return nil - }, InType: reflect.TypeOf(&Service{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ServiceAccount).DeepCopyInto(out.(*ServiceAccount)) - return nil - }, InType: reflect.TypeOf(&ServiceAccount{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ServiceAccountList).DeepCopyInto(out.(*ServiceAccountList)) - return nil - }, InType: reflect.TypeOf(&ServiceAccountList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ServiceList).DeepCopyInto(out.(*ServiceList)) - return nil - }, InType: reflect.TypeOf(&ServiceList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ServicePort).DeepCopyInto(out.(*ServicePort)) - return nil - }, InType: reflect.TypeOf(&ServicePort{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ServiceProxyOptions).DeepCopyInto(out.(*ServiceProxyOptions)) - return nil - }, InType: reflect.TypeOf(&ServiceProxyOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ServiceSpec).DeepCopyInto(out.(*ServiceSpec)) - return nil - }, InType: reflect.TypeOf(&ServiceSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ServiceStatus).DeepCopyInto(out.(*ServiceStatus)) - return nil - }, InType: reflect.TypeOf(&ServiceStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SessionAffinityConfig).DeepCopyInto(out.(*SessionAffinityConfig)) - return nil - }, InType: reflect.TypeOf(&SessionAffinityConfig{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*StorageOSPersistentVolumeSource).DeepCopyInto(out.(*StorageOSPersistentVolumeSource)) - return nil - }, InType: reflect.TypeOf(&StorageOSPersistentVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*StorageOSVolumeSource).DeepCopyInto(out.(*StorageOSVolumeSource)) - return nil - }, InType: reflect.TypeOf(&StorageOSVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Sysctl).DeepCopyInto(out.(*Sysctl)) - return nil - }, InType: reflect.TypeOf(&Sysctl{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*TCPSocketAction).DeepCopyInto(out.(*TCPSocketAction)) - return nil - }, InType: reflect.TypeOf(&TCPSocketAction{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Taint).DeepCopyInto(out.(*Taint)) - return nil - }, InType: reflect.TypeOf(&Taint{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Toleration).DeepCopyInto(out.(*Toleration)) - return nil - }, InType: reflect.TypeOf(&Toleration{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Volume).DeepCopyInto(out.(*Volume)) - return nil - }, InType: reflect.TypeOf(&Volume{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*VolumeMount).DeepCopyInto(out.(*VolumeMount)) - return nil - }, InType: reflect.TypeOf(&VolumeMount{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*VolumeProjection).DeepCopyInto(out.(*VolumeProjection)) - return nil - }, InType: reflect.TypeOf(&VolumeProjection{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*VolumeSource).DeepCopyInto(out.(*VolumeSource)) - return nil - }, InType: reflect.TypeOf(&VolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*VsphereVirtualDiskVolumeSource).DeepCopyInto(out.(*VsphereVirtualDiskVolumeSource)) - return nil - }, InType: reflect.TypeOf(&VsphereVirtualDiskVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*WeightedPodAffinityTerm).DeepCopyInto(out.(*WeightedPodAffinityTerm)) - return nil - }, InType: reflect.TypeOf(&WeightedPodAffinityTerm{})}, - ) -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AWSElasticBlockStoreVolumeSource) DeepCopyInto(out *AWSElasticBlockStoreVolumeSource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSElasticBlockStoreVolumeSource. -func (in *AWSElasticBlockStoreVolumeSource) DeepCopy() *AWSElasticBlockStoreVolumeSource { - if in == nil { - return nil - } - out := new(AWSElasticBlockStoreVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Affinity) DeepCopyInto(out *Affinity) { - *out = *in - if in.NodeAffinity != nil { - in, out := &in.NodeAffinity, &out.NodeAffinity - if *in == nil { - *out = nil - } else { - *out = new(NodeAffinity) - (*in).DeepCopyInto(*out) - } - } - if in.PodAffinity != nil { - in, out := &in.PodAffinity, &out.PodAffinity - if *in == nil { - *out = nil - } else { - *out = new(PodAffinity) - (*in).DeepCopyInto(*out) - } - } - if in.PodAntiAffinity != nil { - in, out := &in.PodAntiAffinity, &out.PodAntiAffinity - if *in == nil { - *out = nil - } else { - *out = new(PodAntiAffinity) - (*in).DeepCopyInto(*out) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Affinity. -func (in *Affinity) DeepCopy() *Affinity { - if in == nil { - return nil - } - out := new(Affinity) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AttachedVolume) DeepCopyInto(out *AttachedVolume) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AttachedVolume. -func (in *AttachedVolume) DeepCopy() *AttachedVolume { - if in == nil { - return nil - } - out := new(AttachedVolume) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AvoidPods) DeepCopyInto(out *AvoidPods) { - *out = *in - if in.PreferAvoidPods != nil { - in, out := &in.PreferAvoidPods, &out.PreferAvoidPods - *out = make([]PreferAvoidPodsEntry, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AvoidPods. -func (in *AvoidPods) DeepCopy() *AvoidPods { - if in == nil { - return nil - } - out := new(AvoidPods) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AzureDiskVolumeSource) DeepCopyInto(out *AzureDiskVolumeSource) { - *out = *in - if in.CachingMode != nil { - in, out := &in.CachingMode, &out.CachingMode - if *in == nil { - *out = nil - } else { - *out = new(AzureDataDiskCachingMode) - **out = **in - } - } - if in.FSType != nil { - in, out := &in.FSType, &out.FSType - if *in == nil { - *out = nil - } else { - *out = new(string) - **out = **in - } - } - if in.ReadOnly != nil { - in, out := &in.ReadOnly, &out.ReadOnly - if *in == nil { - *out = nil - } else { - *out = new(bool) - **out = **in - } - } - if in.Kind != nil { - in, out := &in.Kind, &out.Kind - if *in == nil { - *out = nil - } else { - *out = new(AzureDataDiskKind) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureDiskVolumeSource. -func (in *AzureDiskVolumeSource) DeepCopy() *AzureDiskVolumeSource { - if in == nil { - return nil - } - out := new(AzureDiskVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AzureFilePersistentVolumeSource) DeepCopyInto(out *AzureFilePersistentVolumeSource) { - *out = *in - if in.SecretNamespace != nil { - in, out := &in.SecretNamespace, &out.SecretNamespace - if *in == nil { - *out = nil - } else { - *out = new(string) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureFilePersistentVolumeSource. -func (in *AzureFilePersistentVolumeSource) DeepCopy() *AzureFilePersistentVolumeSource { - if in == nil { - return nil - } - out := new(AzureFilePersistentVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AzureFileVolumeSource) DeepCopyInto(out *AzureFileVolumeSource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureFileVolumeSource. -func (in *AzureFileVolumeSource) DeepCopy() *AzureFileVolumeSource { - if in == nil { - return nil - } - out := new(AzureFileVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Binding) DeepCopyInto(out *Binding) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Target = in.Target - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Binding. -func (in *Binding) DeepCopy() *Binding { - if in == nil { - return nil - } - out := new(Binding) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Binding) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Capabilities) DeepCopyInto(out *Capabilities) { - *out = *in - if in.Add != nil { - in, out := &in.Add, &out.Add - *out = make([]Capability, len(*in)) - copy(*out, *in) - } - if in.Drop != nil { - in, out := &in.Drop, &out.Drop - *out = make([]Capability, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Capabilities. -func (in *Capabilities) DeepCopy() *Capabilities { - if in == nil { - return nil - } - out := new(Capabilities) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CephFSPersistentVolumeSource) DeepCopyInto(out *CephFSPersistentVolumeSource) { - *out = *in - if in.Monitors != nil { - in, out := &in.Monitors, &out.Monitors - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - if *in == nil { - *out = nil - } else { - *out = new(SecretReference) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephFSPersistentVolumeSource. -func (in *CephFSPersistentVolumeSource) DeepCopy() *CephFSPersistentVolumeSource { - if in == nil { - return nil - } - out := new(CephFSPersistentVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CephFSVolumeSource) DeepCopyInto(out *CephFSVolumeSource) { - *out = *in - if in.Monitors != nil { - in, out := &in.Monitors, &out.Monitors - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - if *in == nil { - *out = nil - } else { - *out = new(LocalObjectReference) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephFSVolumeSource. -func (in *CephFSVolumeSource) DeepCopy() *CephFSVolumeSource { - if in == nil { - return nil - } - out := new(CephFSVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CinderVolumeSource) DeepCopyInto(out *CinderVolumeSource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CinderVolumeSource. -func (in *CinderVolumeSource) DeepCopy() *CinderVolumeSource { - if in == nil { - return nil - } - out := new(CinderVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClientIPConfig) DeepCopyInto(out *ClientIPConfig) { - *out = *in - if in.TimeoutSeconds != nil { - in, out := &in.TimeoutSeconds, &out.TimeoutSeconds - if *in == nil { - *out = nil - } else { - *out = new(int32) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientIPConfig. -func (in *ClientIPConfig) DeepCopy() *ClientIPConfig { - if in == nil { - return nil - } - out := new(ClientIPConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ComponentCondition) DeepCopyInto(out *ComponentCondition) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentCondition. -func (in *ComponentCondition) DeepCopy() *ComponentCondition { - if in == nil { - return nil - } - out := new(ComponentCondition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ComponentStatus) DeepCopyInto(out *ComponentStatus) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]ComponentCondition, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentStatus. -func (in *ComponentStatus) DeepCopy() *ComponentStatus { - if in == nil { - return nil - } - out := new(ComponentStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ComponentStatus) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ComponentStatusList) DeepCopyInto(out *ComponentStatusList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ComponentStatus, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentStatusList. -func (in *ComponentStatusList) DeepCopy() *ComponentStatusList { - if in == nil { - return nil - } - out := new(ComponentStatusList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ComponentStatusList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ConfigMap) DeepCopyInto(out *ConfigMap) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.Data != nil { - in, out := &in.Data, &out.Data - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMap. -func (in *ConfigMap) DeepCopy() *ConfigMap { - if in == nil { - return nil - } - out := new(ConfigMap) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ConfigMap) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ConfigMapEnvSource) DeepCopyInto(out *ConfigMapEnvSource) { - *out = *in - out.LocalObjectReference = in.LocalObjectReference - if in.Optional != nil { - in, out := &in.Optional, &out.Optional - if *in == nil { - *out = nil - } else { - *out = new(bool) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapEnvSource. -func (in *ConfigMapEnvSource) DeepCopy() *ConfigMapEnvSource { - if in == nil { - return nil - } - out := new(ConfigMapEnvSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ConfigMapKeySelector) DeepCopyInto(out *ConfigMapKeySelector) { - *out = *in - out.LocalObjectReference = in.LocalObjectReference - if in.Optional != nil { - in, out := &in.Optional, &out.Optional - if *in == nil { - *out = nil - } else { - *out = new(bool) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapKeySelector. -func (in *ConfigMapKeySelector) DeepCopy() *ConfigMapKeySelector { - if in == nil { - return nil - } - out := new(ConfigMapKeySelector) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ConfigMapList) DeepCopyInto(out *ConfigMapList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ConfigMap, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapList. -func (in *ConfigMapList) DeepCopy() *ConfigMapList { - if in == nil { - return nil - } - out := new(ConfigMapList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ConfigMapList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ConfigMapProjection) DeepCopyInto(out *ConfigMapProjection) { - *out = *in - out.LocalObjectReference = in.LocalObjectReference - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]KeyToPath, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Optional != nil { - in, out := &in.Optional, &out.Optional - if *in == nil { - *out = nil - } else { - *out = new(bool) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapProjection. -func (in *ConfigMapProjection) DeepCopy() *ConfigMapProjection { - if in == nil { - return nil - } - out := new(ConfigMapProjection) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ConfigMapVolumeSource) DeepCopyInto(out *ConfigMapVolumeSource) { - *out = *in - out.LocalObjectReference = in.LocalObjectReference - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]KeyToPath, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.DefaultMode != nil { - in, out := &in.DefaultMode, &out.DefaultMode - if *in == nil { - *out = nil - } else { - *out = new(int32) - **out = **in - } - } - if in.Optional != nil { - in, out := &in.Optional, &out.Optional - if *in == nil { - *out = nil - } else { - *out = new(bool) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapVolumeSource. -func (in *ConfigMapVolumeSource) DeepCopy() *ConfigMapVolumeSource { - if in == nil { - return nil - } - out := new(ConfigMapVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Container) DeepCopyInto(out *Container) { - *out = *in - if in.Command != nil { - in, out := &in.Command, &out.Command - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Args != nil { - in, out := &in.Args, &out.Args - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Ports != nil { - in, out := &in.Ports, &out.Ports - *out = make([]ContainerPort, len(*in)) - copy(*out, *in) - } - if in.EnvFrom != nil { - in, out := &in.EnvFrom, &out.EnvFrom - *out = make([]EnvFromSource, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Env != nil { - in, out := &in.Env, &out.Env - *out = make([]EnvVar, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - in.Resources.DeepCopyInto(&out.Resources) - if in.VolumeMounts != nil { - in, out := &in.VolumeMounts, &out.VolumeMounts - *out = make([]VolumeMount, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.LivenessProbe != nil { - in, out := &in.LivenessProbe, &out.LivenessProbe - if *in == nil { - *out = nil - } else { - *out = new(Probe) - (*in).DeepCopyInto(*out) - } - } - if in.ReadinessProbe != nil { - in, out := &in.ReadinessProbe, &out.ReadinessProbe - if *in == nil { - *out = nil - } else { - *out = new(Probe) - (*in).DeepCopyInto(*out) - } - } - if in.Lifecycle != nil { - in, out := &in.Lifecycle, &out.Lifecycle - if *in == nil { - *out = nil - } else { - *out = new(Lifecycle) - (*in).DeepCopyInto(*out) - } - } - if in.SecurityContext != nil { - in, out := &in.SecurityContext, &out.SecurityContext - if *in == nil { - *out = nil - } else { - *out = new(SecurityContext) - (*in).DeepCopyInto(*out) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Container. -func (in *Container) DeepCopy() *Container { - if in == nil { - return nil - } - out := new(Container) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ContainerImage) DeepCopyInto(out *ContainerImage) { - *out = *in - if in.Names != nil { - in, out := &in.Names, &out.Names - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerImage. -func (in *ContainerImage) DeepCopy() *ContainerImage { - if in == nil { - return nil - } - out := new(ContainerImage) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ContainerPort) DeepCopyInto(out *ContainerPort) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerPort. -func (in *ContainerPort) DeepCopy() *ContainerPort { - if in == nil { - return nil - } - out := new(ContainerPort) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ContainerState) DeepCopyInto(out *ContainerState) { - *out = *in - if in.Waiting != nil { - in, out := &in.Waiting, &out.Waiting - if *in == nil { - *out = nil - } else { - *out = new(ContainerStateWaiting) - **out = **in - } - } - if in.Running != nil { - in, out := &in.Running, &out.Running - if *in == nil { - *out = nil - } else { - *out = new(ContainerStateRunning) - (*in).DeepCopyInto(*out) - } - } - if in.Terminated != nil { - in, out := &in.Terminated, &out.Terminated - if *in == nil { - *out = nil - } else { - *out = new(ContainerStateTerminated) - (*in).DeepCopyInto(*out) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerState. -func (in *ContainerState) DeepCopy() *ContainerState { - if in == nil { - return nil - } - out := new(ContainerState) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ContainerStateRunning) DeepCopyInto(out *ContainerStateRunning) { - *out = *in - in.StartedAt.DeepCopyInto(&out.StartedAt) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerStateRunning. -func (in *ContainerStateRunning) DeepCopy() *ContainerStateRunning { - if in == nil { - return nil - } - out := new(ContainerStateRunning) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ContainerStateTerminated) DeepCopyInto(out *ContainerStateTerminated) { - *out = *in - in.StartedAt.DeepCopyInto(&out.StartedAt) - in.FinishedAt.DeepCopyInto(&out.FinishedAt) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerStateTerminated. -func (in *ContainerStateTerminated) DeepCopy() *ContainerStateTerminated { - if in == nil { - return nil - } - out := new(ContainerStateTerminated) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ContainerStateWaiting) DeepCopyInto(out *ContainerStateWaiting) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerStateWaiting. -func (in *ContainerStateWaiting) DeepCopy() *ContainerStateWaiting { - if in == nil { - return nil - } - out := new(ContainerStateWaiting) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ContainerStatus) DeepCopyInto(out *ContainerStatus) { - *out = *in - in.State.DeepCopyInto(&out.State) - in.LastTerminationState.DeepCopyInto(&out.LastTerminationState) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerStatus. -func (in *ContainerStatus) DeepCopy() *ContainerStatus { - if in == nil { - return nil - } - out := new(ContainerStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DaemonEndpoint) DeepCopyInto(out *DaemonEndpoint) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonEndpoint. -func (in *DaemonEndpoint) DeepCopy() *DaemonEndpoint { - if in == nil { - return nil - } - out := new(DaemonEndpoint) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DeleteOptions) DeepCopyInto(out *DeleteOptions) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.GracePeriodSeconds != nil { - in, out := &in.GracePeriodSeconds, &out.GracePeriodSeconds - if *in == nil { - *out = nil - } else { - *out = new(int64) - **out = **in - } - } - if in.Preconditions != nil { - in, out := &in.Preconditions, &out.Preconditions - if *in == nil { - *out = nil - } else { - *out = new(Preconditions) - (*in).DeepCopyInto(*out) - } - } - if in.OrphanDependents != nil { - in, out := &in.OrphanDependents, &out.OrphanDependents - if *in == nil { - *out = nil - } else { - *out = new(bool) - **out = **in - } - } - if in.PropagationPolicy != nil { - in, out := &in.PropagationPolicy, &out.PropagationPolicy - if *in == nil { - *out = nil - } else { - *out = new(DeletionPropagation) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeleteOptions. -func (in *DeleteOptions) DeepCopy() *DeleteOptions { - if in == nil { - return nil - } - out := new(DeleteOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *DeleteOptions) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DownwardAPIProjection) DeepCopyInto(out *DownwardAPIProjection) { - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]DownwardAPIVolumeFile, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DownwardAPIProjection. -func (in *DownwardAPIProjection) DeepCopy() *DownwardAPIProjection { - if in == nil { - return nil - } - out := new(DownwardAPIProjection) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DownwardAPIVolumeFile) DeepCopyInto(out *DownwardAPIVolumeFile) { - *out = *in - if in.FieldRef != nil { - in, out := &in.FieldRef, &out.FieldRef - if *in == nil { - *out = nil - } else { - *out = new(ObjectFieldSelector) - **out = **in - } - } - if in.ResourceFieldRef != nil { - in, out := &in.ResourceFieldRef, &out.ResourceFieldRef - if *in == nil { - *out = nil - } else { - *out = new(ResourceFieldSelector) - (*in).DeepCopyInto(*out) - } - } - if in.Mode != nil { - in, out := &in.Mode, &out.Mode - if *in == nil { - *out = nil - } else { - *out = new(int32) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DownwardAPIVolumeFile. -func (in *DownwardAPIVolumeFile) DeepCopy() *DownwardAPIVolumeFile { - if in == nil { - return nil - } - out := new(DownwardAPIVolumeFile) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DownwardAPIVolumeSource) DeepCopyInto(out *DownwardAPIVolumeSource) { - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]DownwardAPIVolumeFile, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.DefaultMode != nil { - in, out := &in.DefaultMode, &out.DefaultMode - if *in == nil { - *out = nil - } else { - *out = new(int32) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DownwardAPIVolumeSource. -func (in *DownwardAPIVolumeSource) DeepCopy() *DownwardAPIVolumeSource { - if in == nil { - return nil - } - out := new(DownwardAPIVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EmptyDirVolumeSource) DeepCopyInto(out *EmptyDirVolumeSource) { - *out = *in - if in.SizeLimit != nil { - in, out := &in.SizeLimit, &out.SizeLimit - if *in == nil { - *out = nil - } else { - *out = new(resource.Quantity) - **out = (*in).DeepCopy() - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EmptyDirVolumeSource. -func (in *EmptyDirVolumeSource) DeepCopy() *EmptyDirVolumeSource { - if in == nil { - return nil - } - out := new(EmptyDirVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EndpointAddress) DeepCopyInto(out *EndpointAddress) { - *out = *in - if in.NodeName != nil { - in, out := &in.NodeName, &out.NodeName - if *in == nil { - *out = nil - } else { - *out = new(string) - **out = **in - } - } - if in.TargetRef != nil { - in, out := &in.TargetRef, &out.TargetRef - if *in == nil { - *out = nil - } else { - *out = new(ObjectReference) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointAddress. -func (in *EndpointAddress) DeepCopy() *EndpointAddress { - if in == nil { - return nil - } - out := new(EndpointAddress) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EndpointPort) DeepCopyInto(out *EndpointPort) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointPort. -func (in *EndpointPort) DeepCopy() *EndpointPort { - if in == nil { - return nil - } - out := new(EndpointPort) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EndpointSubset) DeepCopyInto(out *EndpointSubset) { - *out = *in - if in.Addresses != nil { - in, out := &in.Addresses, &out.Addresses - *out = make([]EndpointAddress, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.NotReadyAddresses != nil { - in, out := &in.NotReadyAddresses, &out.NotReadyAddresses - *out = make([]EndpointAddress, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Ports != nil { - in, out := &in.Ports, &out.Ports - *out = make([]EndpointPort, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointSubset. -func (in *EndpointSubset) DeepCopy() *EndpointSubset { - if in == nil { - return nil - } - out := new(EndpointSubset) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Endpoints) DeepCopyInto(out *Endpoints) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.Subsets != nil { - in, out := &in.Subsets, &out.Subsets - *out = make([]EndpointSubset, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Endpoints. -func (in *Endpoints) DeepCopy() *Endpoints { - if in == nil { - return nil - } - out := new(Endpoints) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Endpoints) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EndpointsList) DeepCopyInto(out *EndpointsList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Endpoints, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointsList. -func (in *EndpointsList) DeepCopy() *EndpointsList { - if in == nil { - return nil - } - out := new(EndpointsList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *EndpointsList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EnvFromSource) DeepCopyInto(out *EnvFromSource) { - *out = *in - if in.ConfigMapRef != nil { - in, out := &in.ConfigMapRef, &out.ConfigMapRef - if *in == nil { - *out = nil - } else { - *out = new(ConfigMapEnvSource) - (*in).DeepCopyInto(*out) - } - } - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - if *in == nil { - *out = nil - } else { - *out = new(SecretEnvSource) - (*in).DeepCopyInto(*out) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvFromSource. -func (in *EnvFromSource) DeepCopy() *EnvFromSource { - if in == nil { - return nil - } - out := new(EnvFromSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EnvVar) DeepCopyInto(out *EnvVar) { - *out = *in - if in.ValueFrom != nil { - in, out := &in.ValueFrom, &out.ValueFrom - if *in == nil { - *out = nil - } else { - *out = new(EnvVarSource) - (*in).DeepCopyInto(*out) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvVar. -func (in *EnvVar) DeepCopy() *EnvVar { - if in == nil { - return nil - } - out := new(EnvVar) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EnvVarSource) DeepCopyInto(out *EnvVarSource) { - *out = *in - if in.FieldRef != nil { - in, out := &in.FieldRef, &out.FieldRef - if *in == nil { - *out = nil - } else { - *out = new(ObjectFieldSelector) - **out = **in - } - } - if in.ResourceFieldRef != nil { - in, out := &in.ResourceFieldRef, &out.ResourceFieldRef - if *in == nil { - *out = nil - } else { - *out = new(ResourceFieldSelector) - (*in).DeepCopyInto(*out) - } - } - if in.ConfigMapKeyRef != nil { - in, out := &in.ConfigMapKeyRef, &out.ConfigMapKeyRef - if *in == nil { - *out = nil - } else { - *out = new(ConfigMapKeySelector) - (*in).DeepCopyInto(*out) - } - } - if in.SecretKeyRef != nil { - in, out := &in.SecretKeyRef, &out.SecretKeyRef - if *in == nil { - *out = nil - } else { - *out = new(SecretKeySelector) - (*in).DeepCopyInto(*out) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvVarSource. -func (in *EnvVarSource) DeepCopy() *EnvVarSource { - if in == nil { - return nil - } - out := new(EnvVarSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Event) DeepCopyInto(out *Event) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.InvolvedObject = in.InvolvedObject - out.Source = in.Source - in.FirstTimestamp.DeepCopyInto(&out.FirstTimestamp) - in.LastTimestamp.DeepCopyInto(&out.LastTimestamp) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Event. -func (in *Event) DeepCopy() *Event { - if in == nil { - return nil - } - out := new(Event) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Event) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EventList) DeepCopyInto(out *EventList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Event, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventList. -func (in *EventList) DeepCopy() *EventList { - if in == nil { - return nil - } - out := new(EventList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *EventList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EventSource) DeepCopyInto(out *EventSource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventSource. -func (in *EventSource) DeepCopy() *EventSource { - if in == nil { - return nil - } - out := new(EventSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ExecAction) DeepCopyInto(out *ExecAction) { - *out = *in - if in.Command != nil { - in, out := &in.Command, &out.Command - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecAction. -func (in *ExecAction) DeepCopy() *ExecAction { - if in == nil { - return nil - } - out := new(ExecAction) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FCVolumeSource) DeepCopyInto(out *FCVolumeSource) { - *out = *in - if in.TargetWWNs != nil { - in, out := &in.TargetWWNs, &out.TargetWWNs - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Lun != nil { - in, out := &in.Lun, &out.Lun - if *in == nil { - *out = nil - } else { - *out = new(int32) - **out = **in - } - } - if in.WWIDs != nil { - in, out := &in.WWIDs, &out.WWIDs - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FCVolumeSource. -func (in *FCVolumeSource) DeepCopy() *FCVolumeSource { - if in == nil { - return nil - } - out := new(FCVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FlexVolumeSource) DeepCopyInto(out *FlexVolumeSource) { - *out = *in - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - if *in == nil { - *out = nil - } else { - *out = new(LocalObjectReference) - **out = **in - } - } - if in.Options != nil { - in, out := &in.Options, &out.Options - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlexVolumeSource. -func (in *FlexVolumeSource) DeepCopy() *FlexVolumeSource { - if in == nil { - return nil - } - out := new(FlexVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FlockerVolumeSource) DeepCopyInto(out *FlockerVolumeSource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlockerVolumeSource. -func (in *FlockerVolumeSource) DeepCopy() *FlockerVolumeSource { - if in == nil { - return nil - } - out := new(FlockerVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GCEPersistentDiskVolumeSource) DeepCopyInto(out *GCEPersistentDiskVolumeSource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCEPersistentDiskVolumeSource. -func (in *GCEPersistentDiskVolumeSource) DeepCopy() *GCEPersistentDiskVolumeSource { - if in == nil { - return nil - } - out := new(GCEPersistentDiskVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GitRepoVolumeSource) DeepCopyInto(out *GitRepoVolumeSource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepoVolumeSource. -func (in *GitRepoVolumeSource) DeepCopy() *GitRepoVolumeSource { - if in == nil { - return nil - } - out := new(GitRepoVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GlusterfsVolumeSource) DeepCopyInto(out *GlusterfsVolumeSource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlusterfsVolumeSource. -func (in *GlusterfsVolumeSource) DeepCopy() *GlusterfsVolumeSource { - if in == nil { - return nil - } - out := new(GlusterfsVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HTTPGetAction) DeepCopyInto(out *HTTPGetAction) { - *out = *in - out.Port = in.Port - if in.HTTPHeaders != nil { - in, out := &in.HTTPHeaders, &out.HTTPHeaders - *out = make([]HTTPHeader, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPGetAction. -func (in *HTTPGetAction) DeepCopy() *HTTPGetAction { - if in == nil { - return nil - } - out := new(HTTPGetAction) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HTTPHeader) DeepCopyInto(out *HTTPHeader) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPHeader. -func (in *HTTPHeader) DeepCopy() *HTTPHeader { - if in == nil { - return nil - } - out := new(HTTPHeader) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Handler) DeepCopyInto(out *Handler) { - *out = *in - if in.Exec != nil { - in, out := &in.Exec, &out.Exec - if *in == nil { - *out = nil - } else { - *out = new(ExecAction) - (*in).DeepCopyInto(*out) - } - } - if in.HTTPGet != nil { - in, out := &in.HTTPGet, &out.HTTPGet - if *in == nil { - *out = nil - } else { - *out = new(HTTPGetAction) - (*in).DeepCopyInto(*out) - } - } - if in.TCPSocket != nil { - in, out := &in.TCPSocket, &out.TCPSocket - if *in == nil { - *out = nil - } else { - *out = new(TCPSocketAction) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Handler. -func (in *Handler) DeepCopy() *Handler { - if in == nil { - return nil - } - out := new(Handler) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HostAlias) DeepCopyInto(out *HostAlias) { - *out = *in - if in.Hostnames != nil { - in, out := &in.Hostnames, &out.Hostnames - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostAlias. -func (in *HostAlias) DeepCopy() *HostAlias { - if in == nil { - return nil - } - out := new(HostAlias) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HostPathVolumeSource) DeepCopyInto(out *HostPathVolumeSource) { - *out = *in - if in.Type != nil { - in, out := &in.Type, &out.Type - if *in == nil { - *out = nil - } else { - *out = new(HostPathType) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostPathVolumeSource. -func (in *HostPathVolumeSource) DeepCopy() *HostPathVolumeSource { - if in == nil { - return nil - } - out := new(HostPathVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ISCSIVolumeSource) DeepCopyInto(out *ISCSIVolumeSource) { - *out = *in - if in.Portals != nil { - in, out := &in.Portals, &out.Portals - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - if *in == nil { - *out = nil - } else { - *out = new(LocalObjectReference) - **out = **in - } - } - if in.InitiatorName != nil { - in, out := &in.InitiatorName, &out.InitiatorName - if *in == nil { - *out = nil - } else { - *out = new(string) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ISCSIVolumeSource. -func (in *ISCSIVolumeSource) DeepCopy() *ISCSIVolumeSource { - if in == nil { - return nil - } - out := new(ISCSIVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KeyToPath) DeepCopyInto(out *KeyToPath) { - *out = *in - if in.Mode != nil { - in, out := &in.Mode, &out.Mode - if *in == nil { - *out = nil - } else { - *out = new(int32) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KeyToPath. -func (in *KeyToPath) DeepCopy() *KeyToPath { - if in == nil { - return nil - } - out := new(KeyToPath) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Lifecycle) DeepCopyInto(out *Lifecycle) { - *out = *in - if in.PostStart != nil { - in, out := &in.PostStart, &out.PostStart - if *in == nil { - *out = nil - } else { - *out = new(Handler) - (*in).DeepCopyInto(*out) - } - } - if in.PreStop != nil { - in, out := &in.PreStop, &out.PreStop - if *in == nil { - *out = nil - } else { - *out = new(Handler) - (*in).DeepCopyInto(*out) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Lifecycle. -func (in *Lifecycle) DeepCopy() *Lifecycle { - if in == nil { - return nil - } - out := new(Lifecycle) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LimitRange) DeepCopyInto(out *LimitRange) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitRange. -func (in *LimitRange) DeepCopy() *LimitRange { - if in == nil { - return nil - } - out := new(LimitRange) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *LimitRange) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LimitRangeItem) DeepCopyInto(out *LimitRangeItem) { - *out = *in - if in.Max != nil { - in, out := &in.Max, &out.Max - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - if in.Min != nil { - in, out := &in.Min, &out.Min - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - if in.Default != nil { - in, out := &in.Default, &out.Default - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - if in.DefaultRequest != nil { - in, out := &in.DefaultRequest, &out.DefaultRequest - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - if in.MaxLimitRequestRatio != nil { - in, out := &in.MaxLimitRequestRatio, &out.MaxLimitRequestRatio - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitRangeItem. -func (in *LimitRangeItem) DeepCopy() *LimitRangeItem { - if in == nil { - return nil - } - out := new(LimitRangeItem) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LimitRangeList) DeepCopyInto(out *LimitRangeList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]LimitRange, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitRangeList. -func (in *LimitRangeList) DeepCopy() *LimitRangeList { - if in == nil { - return nil - } - out := new(LimitRangeList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *LimitRangeList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LimitRangeSpec) DeepCopyInto(out *LimitRangeSpec) { - *out = *in - if in.Limits != nil { - in, out := &in.Limits, &out.Limits - *out = make([]LimitRangeItem, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitRangeSpec. -func (in *LimitRangeSpec) DeepCopy() *LimitRangeSpec { - if in == nil { - return nil - } - out := new(LimitRangeSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *List) DeepCopyInto(out *List) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]runtime.Object, len(*in)) - for i := range *in { - if (*in)[i] == nil { - (*out)[i] = nil - } else { - (*out)[i] = (*in)[i].DeepCopyObject() - } - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new List. -func (in *List) DeepCopy() *List { - if in == nil { - return nil - } - out := new(List) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *List) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ListOptions) DeepCopyInto(out *ListOptions) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.LabelSelector == nil { - out.LabelSelector = nil - } else { - out.LabelSelector = in.LabelSelector.DeepCopySelector() - } - if in.FieldSelector == nil { - out.FieldSelector = nil - } else { - out.FieldSelector = in.FieldSelector.DeepCopySelector() - } - if in.TimeoutSeconds != nil { - in, out := &in.TimeoutSeconds, &out.TimeoutSeconds - if *in == nil { - *out = nil - } else { - *out = new(int64) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListOptions. -func (in *ListOptions) DeepCopy() *ListOptions { - if in == nil { - return nil - } - out := new(ListOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ListOptions) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LoadBalancerIngress) DeepCopyInto(out *LoadBalancerIngress) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancerIngress. -func (in *LoadBalancerIngress) DeepCopy() *LoadBalancerIngress { - if in == nil { - return nil - } - out := new(LoadBalancerIngress) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LoadBalancerStatus) DeepCopyInto(out *LoadBalancerStatus) { - *out = *in - if in.Ingress != nil { - in, out := &in.Ingress, &out.Ingress - *out = make([]LoadBalancerIngress, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancerStatus. -func (in *LoadBalancerStatus) DeepCopy() *LoadBalancerStatus { - if in == nil { - return nil - } - out := new(LoadBalancerStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LocalObjectReference) DeepCopyInto(out *LocalObjectReference) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalObjectReference. -func (in *LocalObjectReference) DeepCopy() *LocalObjectReference { - if in == nil { - return nil - } - out := new(LocalObjectReference) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LocalVolumeSource) DeepCopyInto(out *LocalVolumeSource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalVolumeSource. -func (in *LocalVolumeSource) DeepCopy() *LocalVolumeSource { - if in == nil { - return nil - } - out := new(LocalVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NFSVolumeSource) DeepCopyInto(out *NFSVolumeSource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NFSVolumeSource. -func (in *NFSVolumeSource) DeepCopy() *NFSVolumeSource { - if in == nil { - return nil - } - out := new(NFSVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Namespace) DeepCopyInto(out *Namespace) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - out.Status = in.Status - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Namespace. -func (in *Namespace) DeepCopy() *Namespace { - if in == nil { - return nil - } - out := new(Namespace) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Namespace) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NamespaceList) DeepCopyInto(out *NamespaceList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Namespace, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespaceList. -func (in *NamespaceList) DeepCopy() *NamespaceList { - if in == nil { - return nil - } - out := new(NamespaceList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NamespaceList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NamespaceSpec) DeepCopyInto(out *NamespaceSpec) { - *out = *in - if in.Finalizers != nil { - in, out := &in.Finalizers, &out.Finalizers - *out = make([]FinalizerName, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespaceSpec. -func (in *NamespaceSpec) DeepCopy() *NamespaceSpec { - if in == nil { - return nil - } - out := new(NamespaceSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NamespaceStatus) DeepCopyInto(out *NamespaceStatus) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespaceStatus. -func (in *NamespaceStatus) DeepCopy() *NamespaceStatus { - if in == nil { - return nil - } - out := new(NamespaceStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Node) DeepCopyInto(out *Node) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Node. -func (in *Node) DeepCopy() *Node { - if in == nil { - return nil - } - out := new(Node) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Node) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeAddress) DeepCopyInto(out *NodeAddress) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeAddress. -func (in *NodeAddress) DeepCopy() *NodeAddress { - if in == nil { - return nil - } - out := new(NodeAddress) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeAffinity) DeepCopyInto(out *NodeAffinity) { - *out = *in - if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { - in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution - if *in == nil { - *out = nil - } else { - *out = new(NodeSelector) - (*in).DeepCopyInto(*out) - } - } - if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { - in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution - *out = make([]PreferredSchedulingTerm, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeAffinity. -func (in *NodeAffinity) DeepCopy() *NodeAffinity { - if in == nil { - return nil - } - out := new(NodeAffinity) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeCondition) DeepCopyInto(out *NodeCondition) { - *out = *in - in.LastHeartbeatTime.DeepCopyInto(&out.LastHeartbeatTime) - in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeCondition. -func (in *NodeCondition) DeepCopy() *NodeCondition { - if in == nil { - return nil - } - out := new(NodeCondition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeConfigSource) DeepCopyInto(out *NodeConfigSource) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.ConfigMapRef != nil { - in, out := &in.ConfigMapRef, &out.ConfigMapRef - if *in == nil { - *out = nil - } else { - *out = new(ObjectReference) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeConfigSource. -func (in *NodeConfigSource) DeepCopy() *NodeConfigSource { - if in == nil { - return nil - } - out := new(NodeConfigSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NodeConfigSource) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeDaemonEndpoints) DeepCopyInto(out *NodeDaemonEndpoints) { - *out = *in - out.KubeletEndpoint = in.KubeletEndpoint - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeDaemonEndpoints. -func (in *NodeDaemonEndpoints) DeepCopy() *NodeDaemonEndpoints { - if in == nil { - return nil - } - out := new(NodeDaemonEndpoints) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeList) DeepCopyInto(out *NodeList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Node, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeList. -func (in *NodeList) DeepCopy() *NodeList { - if in == nil { - return nil - } - out := new(NodeList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NodeList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeProxyOptions) DeepCopyInto(out *NodeProxyOptions) { - *out = *in - out.TypeMeta = in.TypeMeta - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeProxyOptions. -func (in *NodeProxyOptions) DeepCopy() *NodeProxyOptions { - if in == nil { - return nil - } - out := new(NodeProxyOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NodeProxyOptions) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeResources) DeepCopyInto(out *NodeResources) { - *out = *in - if in.Capacity != nil { - in, out := &in.Capacity, &out.Capacity - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeResources. -func (in *NodeResources) DeepCopy() *NodeResources { - if in == nil { - return nil - } - out := new(NodeResources) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeSelector) DeepCopyInto(out *NodeSelector) { - *out = *in - if in.NodeSelectorTerms != nil { - in, out := &in.NodeSelectorTerms, &out.NodeSelectorTerms - *out = make([]NodeSelectorTerm, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSelector. -func (in *NodeSelector) DeepCopy() *NodeSelector { - if in == nil { - return nil - } - out := new(NodeSelector) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeSelectorRequirement) DeepCopyInto(out *NodeSelectorRequirement) { - *out = *in - if in.Values != nil { - in, out := &in.Values, &out.Values - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSelectorRequirement. -func (in *NodeSelectorRequirement) DeepCopy() *NodeSelectorRequirement { - if in == nil { - return nil - } - out := new(NodeSelectorRequirement) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeSelectorTerm) DeepCopyInto(out *NodeSelectorTerm) { - *out = *in - if in.MatchExpressions != nil { - in, out := &in.MatchExpressions, &out.MatchExpressions - *out = make([]NodeSelectorRequirement, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSelectorTerm. -func (in *NodeSelectorTerm) DeepCopy() *NodeSelectorTerm { - if in == nil { - return nil - } - out := new(NodeSelectorTerm) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeSpec) DeepCopyInto(out *NodeSpec) { - *out = *in - if in.Taints != nil { - in, out := &in.Taints, &out.Taints - *out = make([]Taint, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.ConfigSource != nil { - in, out := &in.ConfigSource, &out.ConfigSource - if *in == nil { - *out = nil - } else { - *out = new(NodeConfigSource) - (*in).DeepCopyInto(*out) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSpec. -func (in *NodeSpec) DeepCopy() *NodeSpec { - if in == nil { - return nil - } - out := new(NodeSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeStatus) DeepCopyInto(out *NodeStatus) { - *out = *in - if in.Capacity != nil { - in, out := &in.Capacity, &out.Capacity - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - if in.Allocatable != nil { - in, out := &in.Allocatable, &out.Allocatable - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]NodeCondition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Addresses != nil { - in, out := &in.Addresses, &out.Addresses - *out = make([]NodeAddress, len(*in)) - copy(*out, *in) - } - out.DaemonEndpoints = in.DaemonEndpoints - out.NodeInfo = in.NodeInfo - if in.Images != nil { - in, out := &in.Images, &out.Images - *out = make([]ContainerImage, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.VolumesInUse != nil { - in, out := &in.VolumesInUse, &out.VolumesInUse - *out = make([]UniqueVolumeName, len(*in)) - copy(*out, *in) - } - if in.VolumesAttached != nil { - in, out := &in.VolumesAttached, &out.VolumesAttached - *out = make([]AttachedVolume, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeStatus. -func (in *NodeStatus) DeepCopy() *NodeStatus { - if in == nil { - return nil - } - out := new(NodeStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeSystemInfo) DeepCopyInto(out *NodeSystemInfo) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSystemInfo. -func (in *NodeSystemInfo) DeepCopy() *NodeSystemInfo { - if in == nil { - return nil - } - out := new(NodeSystemInfo) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ObjectFieldSelector) DeepCopyInto(out *ObjectFieldSelector) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectFieldSelector. -func (in *ObjectFieldSelector) DeepCopy() *ObjectFieldSelector { - if in == nil { - return nil - } - out := new(ObjectFieldSelector) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ObjectMeta) DeepCopyInto(out *ObjectMeta) { - *out = *in - in.CreationTimestamp.DeepCopyInto(&out.CreationTimestamp) - if in.DeletionTimestamp != nil { - in, out := &in.DeletionTimestamp, &out.DeletionTimestamp - if *in == nil { - *out = nil - } else { - *out = new(v1.Time) - (*in).DeepCopyInto(*out) - } - } - if in.DeletionGracePeriodSeconds != nil { - in, out := &in.DeletionGracePeriodSeconds, &out.DeletionGracePeriodSeconds - if *in == nil { - *out = nil - } else { - *out = new(int64) - **out = **in - } - } - if in.Labels != nil { - in, out := &in.Labels, &out.Labels - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Annotations != nil { - in, out := &in.Annotations, &out.Annotations - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.OwnerReferences != nil { - in, out := &in.OwnerReferences, &out.OwnerReferences - *out = make([]v1.OwnerReference, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Initializers != nil { - in, out := &in.Initializers, &out.Initializers - if *in == nil { - *out = nil - } else { - *out = new(v1.Initializers) - (*in).DeepCopyInto(*out) - } - } - if in.Finalizers != nil { - in, out := &in.Finalizers, &out.Finalizers - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectMeta. -func (in *ObjectMeta) DeepCopy() *ObjectMeta { - if in == nil { - return nil - } - out := new(ObjectMeta) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ObjectReference) DeepCopyInto(out *ObjectReference) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectReference. -func (in *ObjectReference) DeepCopy() *ObjectReference { - if in == nil { - return nil - } - out := new(ObjectReference) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ObjectReference) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PersistentVolume) DeepCopyInto(out *PersistentVolume) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - out.Status = in.Status - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolume. -func (in *PersistentVolume) DeepCopy() *PersistentVolume { - if in == nil { - return nil - } - out := new(PersistentVolume) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PersistentVolume) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PersistentVolumeClaim) DeepCopyInto(out *PersistentVolumeClaim) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaim. -func (in *PersistentVolumeClaim) DeepCopy() *PersistentVolumeClaim { - if in == nil { - return nil - } - out := new(PersistentVolumeClaim) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PersistentVolumeClaim) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PersistentVolumeClaimCondition) DeepCopyInto(out *PersistentVolumeClaimCondition) { - *out = *in - in.LastProbeTime.DeepCopyInto(&out.LastProbeTime) - in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimCondition. -func (in *PersistentVolumeClaimCondition) DeepCopy() *PersistentVolumeClaimCondition { - if in == nil { - return nil - } - out := new(PersistentVolumeClaimCondition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PersistentVolumeClaimList) DeepCopyInto(out *PersistentVolumeClaimList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]PersistentVolumeClaim, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimList. -func (in *PersistentVolumeClaimList) DeepCopy() *PersistentVolumeClaimList { - if in == nil { - return nil - } - out := new(PersistentVolumeClaimList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PersistentVolumeClaimList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PersistentVolumeClaimSpec) DeepCopyInto(out *PersistentVolumeClaimSpec) { - *out = *in - if in.AccessModes != nil { - in, out := &in.AccessModes, &out.AccessModes - *out = make([]PersistentVolumeAccessMode, len(*in)) - copy(*out, *in) - } - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - if *in == nil { - *out = nil - } else { - *out = new(v1.LabelSelector) - (*in).DeepCopyInto(*out) - } - } - in.Resources.DeepCopyInto(&out.Resources) - if in.StorageClassName != nil { - in, out := &in.StorageClassName, &out.StorageClassName - if *in == nil { - *out = nil - } else { - *out = new(string) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimSpec. -func (in *PersistentVolumeClaimSpec) DeepCopy() *PersistentVolumeClaimSpec { - if in == nil { - return nil - } - out := new(PersistentVolumeClaimSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PersistentVolumeClaimStatus) DeepCopyInto(out *PersistentVolumeClaimStatus) { - *out = *in - if in.AccessModes != nil { - in, out := &in.AccessModes, &out.AccessModes - *out = make([]PersistentVolumeAccessMode, len(*in)) - copy(*out, *in) - } - if in.Capacity != nil { - in, out := &in.Capacity, &out.Capacity - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]PersistentVolumeClaimCondition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimStatus. -func (in *PersistentVolumeClaimStatus) DeepCopy() *PersistentVolumeClaimStatus { - if in == nil { - return nil - } - out := new(PersistentVolumeClaimStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PersistentVolumeClaimVolumeSource) DeepCopyInto(out *PersistentVolumeClaimVolumeSource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimVolumeSource. -func (in *PersistentVolumeClaimVolumeSource) DeepCopy() *PersistentVolumeClaimVolumeSource { - if in == nil { - return nil - } - out := new(PersistentVolumeClaimVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PersistentVolumeList) DeepCopyInto(out *PersistentVolumeList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]PersistentVolume, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeList. -func (in *PersistentVolumeList) DeepCopy() *PersistentVolumeList { - if in == nil { - return nil - } - out := new(PersistentVolumeList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PersistentVolumeList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PersistentVolumeSource) DeepCopyInto(out *PersistentVolumeSource) { - *out = *in - if in.GCEPersistentDisk != nil { - in, out := &in.GCEPersistentDisk, &out.GCEPersistentDisk - if *in == nil { - *out = nil - } else { - *out = new(GCEPersistentDiskVolumeSource) - **out = **in - } - } - if in.AWSElasticBlockStore != nil { - in, out := &in.AWSElasticBlockStore, &out.AWSElasticBlockStore - if *in == nil { - *out = nil - } else { - *out = new(AWSElasticBlockStoreVolumeSource) - **out = **in - } - } - if in.HostPath != nil { - in, out := &in.HostPath, &out.HostPath - if *in == nil { - *out = nil - } else { - *out = new(HostPathVolumeSource) - (*in).DeepCopyInto(*out) - } - } - if in.Glusterfs != nil { - in, out := &in.Glusterfs, &out.Glusterfs - if *in == nil { - *out = nil - } else { - *out = new(GlusterfsVolumeSource) - **out = **in - } - } - if in.NFS != nil { - in, out := &in.NFS, &out.NFS - if *in == nil { - *out = nil - } else { - *out = new(NFSVolumeSource) - **out = **in - } - } - if in.RBD != nil { - in, out := &in.RBD, &out.RBD - if *in == nil { - *out = nil - } else { - *out = new(RBDVolumeSource) - (*in).DeepCopyInto(*out) - } - } - if in.Quobyte != nil { - in, out := &in.Quobyte, &out.Quobyte - if *in == nil { - *out = nil - } else { - *out = new(QuobyteVolumeSource) - **out = **in - } - } - if in.ISCSI != nil { - in, out := &in.ISCSI, &out.ISCSI - if *in == nil { - *out = nil - } else { - *out = new(ISCSIVolumeSource) - (*in).DeepCopyInto(*out) - } - } - if in.FlexVolume != nil { - in, out := &in.FlexVolume, &out.FlexVolume - if *in == nil { - *out = nil - } else { - *out = new(FlexVolumeSource) - (*in).DeepCopyInto(*out) - } - } - if in.Cinder != nil { - in, out := &in.Cinder, &out.Cinder - if *in == nil { - *out = nil - } else { - *out = new(CinderVolumeSource) - **out = **in - } - } - if in.CephFS != nil { - in, out := &in.CephFS, &out.CephFS - if *in == nil { - *out = nil - } else { - *out = new(CephFSPersistentVolumeSource) - (*in).DeepCopyInto(*out) - } - } - if in.FC != nil { - in, out := &in.FC, &out.FC - if *in == nil { - *out = nil - } else { - *out = new(FCVolumeSource) - (*in).DeepCopyInto(*out) - } - } - if in.Flocker != nil { - in, out := &in.Flocker, &out.Flocker - if *in == nil { - *out = nil - } else { - *out = new(FlockerVolumeSource) - **out = **in - } - } - if in.AzureFile != nil { - in, out := &in.AzureFile, &out.AzureFile - if *in == nil { - *out = nil - } else { - *out = new(AzureFilePersistentVolumeSource) - (*in).DeepCopyInto(*out) - } - } - if in.VsphereVolume != nil { - in, out := &in.VsphereVolume, &out.VsphereVolume - if *in == nil { - *out = nil - } else { - *out = new(VsphereVirtualDiskVolumeSource) - **out = **in - } - } - if in.AzureDisk != nil { - in, out := &in.AzureDisk, &out.AzureDisk - if *in == nil { - *out = nil - } else { - *out = new(AzureDiskVolumeSource) - (*in).DeepCopyInto(*out) - } - } - if in.PhotonPersistentDisk != nil { - in, out := &in.PhotonPersistentDisk, &out.PhotonPersistentDisk - if *in == nil { - *out = nil - } else { - *out = new(PhotonPersistentDiskVolumeSource) - **out = **in - } - } - if in.PortworxVolume != nil { - in, out := &in.PortworxVolume, &out.PortworxVolume - if *in == nil { - *out = nil - } else { - *out = new(PortworxVolumeSource) - **out = **in - } - } - if in.ScaleIO != nil { - in, out := &in.ScaleIO, &out.ScaleIO - if *in == nil { - *out = nil - } else { - *out = new(ScaleIOVolumeSource) - (*in).DeepCopyInto(*out) - } - } - if in.Local != nil { - in, out := &in.Local, &out.Local - if *in == nil { - *out = nil - } else { - *out = new(LocalVolumeSource) - **out = **in - } - } - if in.StorageOS != nil { - in, out := &in.StorageOS, &out.StorageOS - if *in == nil { - *out = nil - } else { - *out = new(StorageOSPersistentVolumeSource) - (*in).DeepCopyInto(*out) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeSource. -func (in *PersistentVolumeSource) DeepCopy() *PersistentVolumeSource { - if in == nil { - return nil - } - out := new(PersistentVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PersistentVolumeSpec) DeepCopyInto(out *PersistentVolumeSpec) { - *out = *in - if in.Capacity != nil { - in, out := &in.Capacity, &out.Capacity - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - in.PersistentVolumeSource.DeepCopyInto(&out.PersistentVolumeSource) - if in.AccessModes != nil { - in, out := &in.AccessModes, &out.AccessModes - *out = make([]PersistentVolumeAccessMode, len(*in)) - copy(*out, *in) - } - if in.ClaimRef != nil { - in, out := &in.ClaimRef, &out.ClaimRef - if *in == nil { - *out = nil - } else { - *out = new(ObjectReference) - **out = **in - } - } - if in.MountOptions != nil { - in, out := &in.MountOptions, &out.MountOptions - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeSpec. -func (in *PersistentVolumeSpec) DeepCopy() *PersistentVolumeSpec { - if in == nil { - return nil - } - out := new(PersistentVolumeSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PersistentVolumeStatus) DeepCopyInto(out *PersistentVolumeStatus) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeStatus. -func (in *PersistentVolumeStatus) DeepCopy() *PersistentVolumeStatus { - if in == nil { - return nil - } - out := new(PersistentVolumeStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PhotonPersistentDiskVolumeSource) DeepCopyInto(out *PhotonPersistentDiskVolumeSource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PhotonPersistentDiskVolumeSource. -func (in *PhotonPersistentDiskVolumeSource) DeepCopy() *PhotonPersistentDiskVolumeSource { - if in == nil { - return nil - } - out := new(PhotonPersistentDiskVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Pod) DeepCopyInto(out *Pod) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Pod. -func (in *Pod) DeepCopy() *Pod { - if in == nil { - return nil - } - out := new(Pod) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Pod) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodAffinity) DeepCopyInto(out *PodAffinity) { - *out = *in - if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { - in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution - *out = make([]PodAffinityTerm, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { - in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution - *out = make([]WeightedPodAffinityTerm, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodAffinity. -func (in *PodAffinity) DeepCopy() *PodAffinity { - if in == nil { - return nil - } - out := new(PodAffinity) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodAffinityTerm) DeepCopyInto(out *PodAffinityTerm) { - *out = *in - if in.LabelSelector != nil { - in, out := &in.LabelSelector, &out.LabelSelector - if *in == nil { - *out = nil - } else { - *out = new(v1.LabelSelector) - (*in).DeepCopyInto(*out) - } - } - if in.Namespaces != nil { - in, out := &in.Namespaces, &out.Namespaces - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodAffinityTerm. -func (in *PodAffinityTerm) DeepCopy() *PodAffinityTerm { - if in == nil { - return nil - } - out := new(PodAffinityTerm) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodAntiAffinity) DeepCopyInto(out *PodAntiAffinity) { - *out = *in - if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { - in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution - *out = make([]PodAffinityTerm, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { - in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution - *out = make([]WeightedPodAffinityTerm, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodAntiAffinity. -func (in *PodAntiAffinity) DeepCopy() *PodAntiAffinity { - if in == nil { - return nil - } - out := new(PodAntiAffinity) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodAttachOptions) DeepCopyInto(out *PodAttachOptions) { - *out = *in - out.TypeMeta = in.TypeMeta - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodAttachOptions. -func (in *PodAttachOptions) DeepCopy() *PodAttachOptions { - if in == nil { - return nil - } - out := new(PodAttachOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodAttachOptions) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodCondition) DeepCopyInto(out *PodCondition) { - *out = *in - in.LastProbeTime.DeepCopyInto(&out.LastProbeTime) - in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodCondition. -func (in *PodCondition) DeepCopy() *PodCondition { - if in == nil { - return nil - } - out := new(PodCondition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodExecOptions) DeepCopyInto(out *PodExecOptions) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.Command != nil { - in, out := &in.Command, &out.Command - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodExecOptions. -func (in *PodExecOptions) DeepCopy() *PodExecOptions { - if in == nil { - return nil - } - out := new(PodExecOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodExecOptions) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodList) DeepCopyInto(out *PodList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Pod, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodList. -func (in *PodList) DeepCopy() *PodList { - if in == nil { - return nil - } - out := new(PodList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodLogOptions) DeepCopyInto(out *PodLogOptions) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.SinceSeconds != nil { - in, out := &in.SinceSeconds, &out.SinceSeconds - if *in == nil { - *out = nil - } else { - *out = new(int64) - **out = **in - } - } - if in.SinceTime != nil { - in, out := &in.SinceTime, &out.SinceTime - if *in == nil { - *out = nil - } else { - *out = new(v1.Time) - (*in).DeepCopyInto(*out) - } - } - if in.TailLines != nil { - in, out := &in.TailLines, &out.TailLines - if *in == nil { - *out = nil - } else { - *out = new(int64) - **out = **in - } - } - if in.LimitBytes != nil { - in, out := &in.LimitBytes, &out.LimitBytes - if *in == nil { - *out = nil - } else { - *out = new(int64) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodLogOptions. -func (in *PodLogOptions) DeepCopy() *PodLogOptions { - if in == nil { - return nil - } - out := new(PodLogOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodLogOptions) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodPortForwardOptions) DeepCopyInto(out *PodPortForwardOptions) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.Ports != nil { - in, out := &in.Ports, &out.Ports - *out = make([]int32, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodPortForwardOptions. -func (in *PodPortForwardOptions) DeepCopy() *PodPortForwardOptions { - if in == nil { - return nil - } - out := new(PodPortForwardOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodPortForwardOptions) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodProxyOptions) DeepCopyInto(out *PodProxyOptions) { - *out = *in - out.TypeMeta = in.TypeMeta - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodProxyOptions. -func (in *PodProxyOptions) DeepCopy() *PodProxyOptions { - if in == nil { - return nil - } - out := new(PodProxyOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodProxyOptions) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodSecurityContext) DeepCopyInto(out *PodSecurityContext) { - *out = *in - if in.SELinuxOptions != nil { - in, out := &in.SELinuxOptions, &out.SELinuxOptions - if *in == nil { - *out = nil - } else { - *out = new(SELinuxOptions) - **out = **in - } - } - if in.RunAsUser != nil { - in, out := &in.RunAsUser, &out.RunAsUser - if *in == nil { - *out = nil - } else { - *out = new(int64) - **out = **in - } - } - if in.RunAsNonRoot != nil { - in, out := &in.RunAsNonRoot, &out.RunAsNonRoot - if *in == nil { - *out = nil - } else { - *out = new(bool) - **out = **in - } - } - if in.SupplementalGroups != nil { - in, out := &in.SupplementalGroups, &out.SupplementalGroups - *out = make([]int64, len(*in)) - copy(*out, *in) - } - if in.FSGroup != nil { - in, out := &in.FSGroup, &out.FSGroup - if *in == nil { - *out = nil - } else { - *out = new(int64) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityContext. -func (in *PodSecurityContext) DeepCopy() *PodSecurityContext { - if in == nil { - return nil - } - out := new(PodSecurityContext) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodSignature) DeepCopyInto(out *PodSignature) { - *out = *in - if in.PodController != nil { - in, out := &in.PodController, &out.PodController - if *in == nil { - *out = nil - } else { - *out = new(v1.OwnerReference) - (*in).DeepCopyInto(*out) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSignature. -func (in *PodSignature) DeepCopy() *PodSignature { - if in == nil { - return nil - } - out := new(PodSignature) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodSpec) DeepCopyInto(out *PodSpec) { - *out = *in - if in.Volumes != nil { - in, out := &in.Volumes, &out.Volumes - *out = make([]Volume, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.InitContainers != nil { - in, out := &in.InitContainers, &out.InitContainers - *out = make([]Container, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Containers != nil { - in, out := &in.Containers, &out.Containers - *out = make([]Container, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.TerminationGracePeriodSeconds != nil { - in, out := &in.TerminationGracePeriodSeconds, &out.TerminationGracePeriodSeconds - if *in == nil { - *out = nil - } else { - *out = new(int64) - **out = **in - } - } - if in.ActiveDeadlineSeconds != nil { - in, out := &in.ActiveDeadlineSeconds, &out.ActiveDeadlineSeconds - if *in == nil { - *out = nil - } else { - *out = new(int64) - **out = **in - } - } - if in.NodeSelector != nil { - in, out := &in.NodeSelector, &out.NodeSelector - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.AutomountServiceAccountToken != nil { - in, out := &in.AutomountServiceAccountToken, &out.AutomountServiceAccountToken - if *in == nil { - *out = nil - } else { - *out = new(bool) - **out = **in - } - } - if in.SecurityContext != nil { - in, out := &in.SecurityContext, &out.SecurityContext - if *in == nil { - *out = nil - } else { - *out = new(PodSecurityContext) - (*in).DeepCopyInto(*out) - } - } - if in.ImagePullSecrets != nil { - in, out := &in.ImagePullSecrets, &out.ImagePullSecrets - *out = make([]LocalObjectReference, len(*in)) - copy(*out, *in) - } - if in.Affinity != nil { - in, out := &in.Affinity, &out.Affinity - if *in == nil { - *out = nil - } else { - *out = new(Affinity) - (*in).DeepCopyInto(*out) - } - } - if in.Tolerations != nil { - in, out := &in.Tolerations, &out.Tolerations - *out = make([]Toleration, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.HostAliases != nil { - in, out := &in.HostAliases, &out.HostAliases - *out = make([]HostAlias, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Priority != nil { - in, out := &in.Priority, &out.Priority - if *in == nil { - *out = nil - } else { - *out = new(int32) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSpec. -func (in *PodSpec) DeepCopy() *PodSpec { - if in == nil { - return nil - } - out := new(PodSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodStatus) DeepCopyInto(out *PodStatus) { - *out = *in - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]PodCondition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.StartTime != nil { - in, out := &in.StartTime, &out.StartTime - if *in == nil { - *out = nil - } else { - *out = new(v1.Time) - (*in).DeepCopyInto(*out) - } - } - if in.InitContainerStatuses != nil { - in, out := &in.InitContainerStatuses, &out.InitContainerStatuses - *out = make([]ContainerStatus, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.ContainerStatuses != nil { - in, out := &in.ContainerStatuses, &out.ContainerStatuses - *out = make([]ContainerStatus, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodStatus. -func (in *PodStatus) DeepCopy() *PodStatus { - if in == nil { - return nil - } - out := new(PodStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodStatusResult) DeepCopyInto(out *PodStatusResult) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodStatusResult. -func (in *PodStatusResult) DeepCopy() *PodStatusResult { - if in == nil { - return nil - } - out := new(PodStatusResult) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodStatusResult) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodTemplate) DeepCopyInto(out *PodTemplate) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Template.DeepCopyInto(&out.Template) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodTemplate. -func (in *PodTemplate) DeepCopy() *PodTemplate { - if in == nil { - return nil - } - out := new(PodTemplate) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodTemplate) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodTemplateList) DeepCopyInto(out *PodTemplateList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]PodTemplate, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodTemplateList. -func (in *PodTemplateList) DeepCopy() *PodTemplateList { - if in == nil { - return nil - } - out := new(PodTemplateList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodTemplateList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodTemplateSpec) DeepCopyInto(out *PodTemplateSpec) { - *out = *in - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodTemplateSpec. -func (in *PodTemplateSpec) DeepCopy() *PodTemplateSpec { - if in == nil { - return nil - } - out := new(PodTemplateSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PortworxVolumeSource) DeepCopyInto(out *PortworxVolumeSource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortworxVolumeSource. -func (in *PortworxVolumeSource) DeepCopy() *PortworxVolumeSource { - if in == nil { - return nil - } - out := new(PortworxVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Preconditions) DeepCopyInto(out *Preconditions) { - *out = *in - if in.UID != nil { - in, out := &in.UID, &out.UID - if *in == nil { - *out = nil - } else { - *out = new(types.UID) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Preconditions. -func (in *Preconditions) DeepCopy() *Preconditions { - if in == nil { - return nil - } - out := new(Preconditions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PreferAvoidPodsEntry) DeepCopyInto(out *PreferAvoidPodsEntry) { - *out = *in - in.PodSignature.DeepCopyInto(&out.PodSignature) - in.EvictionTime.DeepCopyInto(&out.EvictionTime) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PreferAvoidPodsEntry. -func (in *PreferAvoidPodsEntry) DeepCopy() *PreferAvoidPodsEntry { - if in == nil { - return nil - } - out := new(PreferAvoidPodsEntry) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PreferredSchedulingTerm) DeepCopyInto(out *PreferredSchedulingTerm) { - *out = *in - in.Preference.DeepCopyInto(&out.Preference) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PreferredSchedulingTerm. -func (in *PreferredSchedulingTerm) DeepCopy() *PreferredSchedulingTerm { - if in == nil { - return nil - } - out := new(PreferredSchedulingTerm) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Probe) DeepCopyInto(out *Probe) { - *out = *in - in.Handler.DeepCopyInto(&out.Handler) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Probe. -func (in *Probe) DeepCopy() *Probe { - if in == nil { - return nil - } - out := new(Probe) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ProjectedVolumeSource) DeepCopyInto(out *ProjectedVolumeSource) { - *out = *in - if in.Sources != nil { - in, out := &in.Sources, &out.Sources - *out = make([]VolumeProjection, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.DefaultMode != nil { - in, out := &in.DefaultMode, &out.DefaultMode - if *in == nil { - *out = nil - } else { - *out = new(int32) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectedVolumeSource. -func (in *ProjectedVolumeSource) DeepCopy() *ProjectedVolumeSource { - if in == nil { - return nil - } - out := new(ProjectedVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *QuobyteVolumeSource) DeepCopyInto(out *QuobyteVolumeSource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QuobyteVolumeSource. -func (in *QuobyteVolumeSource) DeepCopy() *QuobyteVolumeSource { - if in == nil { - return nil - } - out := new(QuobyteVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RBDVolumeSource) DeepCopyInto(out *RBDVolumeSource) { - *out = *in - if in.CephMonitors != nil { - in, out := &in.CephMonitors, &out.CephMonitors - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - if *in == nil { - *out = nil - } else { - *out = new(LocalObjectReference) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RBDVolumeSource. -func (in *RBDVolumeSource) DeepCopy() *RBDVolumeSource { - if in == nil { - return nil - } - out := new(RBDVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RangeAllocation) DeepCopyInto(out *RangeAllocation) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.Data != nil { - in, out := &in.Data, &out.Data - *out = make([]byte, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RangeAllocation. -func (in *RangeAllocation) DeepCopy() *RangeAllocation { - if in == nil { - return nil - } - out := new(RangeAllocation) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *RangeAllocation) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ReplicationController) DeepCopyInto(out *ReplicationController) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationController. -func (in *ReplicationController) DeepCopy() *ReplicationController { - if in == nil { - return nil - } - out := new(ReplicationController) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ReplicationController) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ReplicationControllerCondition) DeepCopyInto(out *ReplicationControllerCondition) { - *out = *in - in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationControllerCondition. -func (in *ReplicationControllerCondition) DeepCopy() *ReplicationControllerCondition { - if in == nil { - return nil - } - out := new(ReplicationControllerCondition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ReplicationControllerList) DeepCopyInto(out *ReplicationControllerList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ReplicationController, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationControllerList. -func (in *ReplicationControllerList) DeepCopy() *ReplicationControllerList { - if in == nil { - return nil - } - out := new(ReplicationControllerList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ReplicationControllerList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ReplicationControllerSpec) DeepCopyInto(out *ReplicationControllerSpec) { - *out = *in - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Template != nil { - in, out := &in.Template, &out.Template - if *in == nil { - *out = nil - } else { - *out = new(PodTemplateSpec) - (*in).DeepCopyInto(*out) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationControllerSpec. -func (in *ReplicationControllerSpec) DeepCopy() *ReplicationControllerSpec { - if in == nil { - return nil - } - out := new(ReplicationControllerSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ReplicationControllerStatus) DeepCopyInto(out *ReplicationControllerStatus) { - *out = *in - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]ReplicationControllerCondition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationControllerStatus. -func (in *ReplicationControllerStatus) DeepCopy() *ReplicationControllerStatus { - if in == nil { - return nil - } - out := new(ReplicationControllerStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceFieldSelector) DeepCopyInto(out *ResourceFieldSelector) { - *out = *in - out.Divisor = in.Divisor.DeepCopy() - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceFieldSelector. -func (in *ResourceFieldSelector) DeepCopy() *ResourceFieldSelector { - if in == nil { - return nil - } - out := new(ResourceFieldSelector) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceQuota) DeepCopyInto(out *ResourceQuota) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceQuota. -func (in *ResourceQuota) DeepCopy() *ResourceQuota { - if in == nil { - return nil - } - out := new(ResourceQuota) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ResourceQuota) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceQuotaList) DeepCopyInto(out *ResourceQuotaList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ResourceQuota, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceQuotaList. -func (in *ResourceQuotaList) DeepCopy() *ResourceQuotaList { - if in == nil { - return nil - } - out := new(ResourceQuotaList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ResourceQuotaList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceQuotaSpec) DeepCopyInto(out *ResourceQuotaSpec) { - *out = *in - if in.Hard != nil { - in, out := &in.Hard, &out.Hard - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - if in.Scopes != nil { - in, out := &in.Scopes, &out.Scopes - *out = make([]ResourceQuotaScope, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceQuotaSpec. -func (in *ResourceQuotaSpec) DeepCopy() *ResourceQuotaSpec { - if in == nil { - return nil - } - out := new(ResourceQuotaSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceQuotaStatus) DeepCopyInto(out *ResourceQuotaStatus) { - *out = *in - if in.Hard != nil { - in, out := &in.Hard, &out.Hard - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - if in.Used != nil { - in, out := &in.Used, &out.Used - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceQuotaStatus. -func (in *ResourceQuotaStatus) DeepCopy() *ResourceQuotaStatus { - if in == nil { - return nil - } - out := new(ResourceQuotaStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceRequirements) DeepCopyInto(out *ResourceRequirements) { - *out = *in - if in.Limits != nil { - in, out := &in.Limits, &out.Limits - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - if in.Requests != nil { - in, out := &in.Requests, &out.Requests - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceRequirements. -func (in *ResourceRequirements) DeepCopy() *ResourceRequirements { - if in == nil { - return nil - } - out := new(ResourceRequirements) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SELinuxOptions) DeepCopyInto(out *SELinuxOptions) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SELinuxOptions. -func (in *SELinuxOptions) DeepCopy() *SELinuxOptions { - if in == nil { - return nil - } - out := new(SELinuxOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ScaleIOVolumeSource) DeepCopyInto(out *ScaleIOVolumeSource) { - *out = *in - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - if *in == nil { - *out = nil - } else { - *out = new(LocalObjectReference) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleIOVolumeSource. -func (in *ScaleIOVolumeSource) DeepCopy() *ScaleIOVolumeSource { - if in == nil { - return nil - } - out := new(ScaleIOVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Secret) DeepCopyInto(out *Secret) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.Data != nil { - in, out := &in.Data, &out.Data - *out = make(map[string][]byte, len(*in)) - for key, val := range *in { - if val == nil { - (*out)[key] = nil - } else { - (*out)[key] = make([]byte, len(val)) - copy((*out)[key], val) - } - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Secret. -func (in *Secret) DeepCopy() *Secret { - if in == nil { - return nil - } - out := new(Secret) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Secret) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SecretEnvSource) DeepCopyInto(out *SecretEnvSource) { - *out = *in - out.LocalObjectReference = in.LocalObjectReference - if in.Optional != nil { - in, out := &in.Optional, &out.Optional - if *in == nil { - *out = nil - } else { - *out = new(bool) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretEnvSource. -func (in *SecretEnvSource) DeepCopy() *SecretEnvSource { - if in == nil { - return nil - } - out := new(SecretEnvSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SecretKeySelector) DeepCopyInto(out *SecretKeySelector) { - *out = *in - out.LocalObjectReference = in.LocalObjectReference - if in.Optional != nil { - in, out := &in.Optional, &out.Optional - if *in == nil { - *out = nil - } else { - *out = new(bool) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretKeySelector. -func (in *SecretKeySelector) DeepCopy() *SecretKeySelector { - if in == nil { - return nil - } - out := new(SecretKeySelector) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SecretList) DeepCopyInto(out *SecretList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Secret, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretList. -func (in *SecretList) DeepCopy() *SecretList { - if in == nil { - return nil - } - out := new(SecretList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *SecretList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SecretProjection) DeepCopyInto(out *SecretProjection) { - *out = *in - out.LocalObjectReference = in.LocalObjectReference - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]KeyToPath, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Optional != nil { - in, out := &in.Optional, &out.Optional - if *in == nil { - *out = nil - } else { - *out = new(bool) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretProjection. -func (in *SecretProjection) DeepCopy() *SecretProjection { - if in == nil { - return nil - } - out := new(SecretProjection) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SecretReference) DeepCopyInto(out *SecretReference) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretReference. -func (in *SecretReference) DeepCopy() *SecretReference { - if in == nil { - return nil - } - out := new(SecretReference) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SecretVolumeSource) DeepCopyInto(out *SecretVolumeSource) { - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]KeyToPath, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.DefaultMode != nil { - in, out := &in.DefaultMode, &out.DefaultMode - if *in == nil { - *out = nil - } else { - *out = new(int32) - **out = **in - } - } - if in.Optional != nil { - in, out := &in.Optional, &out.Optional - if *in == nil { - *out = nil - } else { - *out = new(bool) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretVolumeSource. -func (in *SecretVolumeSource) DeepCopy() *SecretVolumeSource { - if in == nil { - return nil - } - out := new(SecretVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SecurityContext) DeepCopyInto(out *SecurityContext) { - *out = *in - if in.Capabilities != nil { - in, out := &in.Capabilities, &out.Capabilities - if *in == nil { - *out = nil - } else { - *out = new(Capabilities) - (*in).DeepCopyInto(*out) - } - } - if in.Privileged != nil { - in, out := &in.Privileged, &out.Privileged - if *in == nil { - *out = nil - } else { - *out = new(bool) - **out = **in - } - } - if in.SELinuxOptions != nil { - in, out := &in.SELinuxOptions, &out.SELinuxOptions - if *in == nil { - *out = nil - } else { - *out = new(SELinuxOptions) - **out = **in - } - } - if in.RunAsUser != nil { - in, out := &in.RunAsUser, &out.RunAsUser - if *in == nil { - *out = nil - } else { - *out = new(int64) - **out = **in - } - } - if in.RunAsNonRoot != nil { - in, out := &in.RunAsNonRoot, &out.RunAsNonRoot - if *in == nil { - *out = nil - } else { - *out = new(bool) - **out = **in - } - } - if in.ReadOnlyRootFilesystem != nil { - in, out := &in.ReadOnlyRootFilesystem, &out.ReadOnlyRootFilesystem - if *in == nil { - *out = nil - } else { - *out = new(bool) - **out = **in - } - } - if in.AllowPrivilegeEscalation != nil { - in, out := &in.AllowPrivilegeEscalation, &out.AllowPrivilegeEscalation - if *in == nil { - *out = nil - } else { - *out = new(bool) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityContext. -func (in *SecurityContext) DeepCopy() *SecurityContext { - if in == nil { - return nil - } - out := new(SecurityContext) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SerializedReference) DeepCopyInto(out *SerializedReference) { - *out = *in - out.TypeMeta = in.TypeMeta - out.Reference = in.Reference - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SerializedReference. -func (in *SerializedReference) DeepCopy() *SerializedReference { - if in == nil { - return nil - } - out := new(SerializedReference) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *SerializedReference) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Service) DeepCopyInto(out *Service) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Service. -func (in *Service) DeepCopy() *Service { - if in == nil { - return nil - } - out := new(Service) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Service) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServiceAccount) DeepCopyInto(out *ServiceAccount) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.Secrets != nil { - in, out := &in.Secrets, &out.Secrets - *out = make([]ObjectReference, len(*in)) - copy(*out, *in) - } - if in.ImagePullSecrets != nil { - in, out := &in.ImagePullSecrets, &out.ImagePullSecrets - *out = make([]LocalObjectReference, len(*in)) - copy(*out, *in) - } - if in.AutomountServiceAccountToken != nil { - in, out := &in.AutomountServiceAccountToken, &out.AutomountServiceAccountToken - if *in == nil { - *out = nil - } else { - *out = new(bool) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccount. -func (in *ServiceAccount) DeepCopy() *ServiceAccount { - if in == nil { - return nil - } - out := new(ServiceAccount) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ServiceAccount) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServiceAccountList) DeepCopyInto(out *ServiceAccountList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ServiceAccount, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountList. -func (in *ServiceAccountList) DeepCopy() *ServiceAccountList { - if in == nil { - return nil - } - out := new(ServiceAccountList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ServiceAccountList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServiceList) DeepCopyInto(out *ServiceList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Service, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceList. -func (in *ServiceList) DeepCopy() *ServiceList { - if in == nil { - return nil - } - out := new(ServiceList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ServiceList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServicePort) DeepCopyInto(out *ServicePort) { - *out = *in - out.TargetPort = in.TargetPort - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServicePort. -func (in *ServicePort) DeepCopy() *ServicePort { - if in == nil { - return nil - } - out := new(ServicePort) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServiceProxyOptions) DeepCopyInto(out *ServiceProxyOptions) { - *out = *in - out.TypeMeta = in.TypeMeta - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceProxyOptions. -func (in *ServiceProxyOptions) DeepCopy() *ServiceProxyOptions { - if in == nil { - return nil - } - out := new(ServiceProxyOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ServiceProxyOptions) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServiceSpec) DeepCopyInto(out *ServiceSpec) { - *out = *in - if in.Ports != nil { - in, out := &in.Ports, &out.Ports - *out = make([]ServicePort, len(*in)) - copy(*out, *in) - } - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.ExternalIPs != nil { - in, out := &in.ExternalIPs, &out.ExternalIPs - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.SessionAffinityConfig != nil { - in, out := &in.SessionAffinityConfig, &out.SessionAffinityConfig - if *in == nil { - *out = nil - } else { - *out = new(SessionAffinityConfig) - (*in).DeepCopyInto(*out) - } - } - if in.LoadBalancerSourceRanges != nil { - in, out := &in.LoadBalancerSourceRanges, &out.LoadBalancerSourceRanges - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceSpec. -func (in *ServiceSpec) DeepCopy() *ServiceSpec { - if in == nil { - return nil - } - out := new(ServiceSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServiceStatus) DeepCopyInto(out *ServiceStatus) { - *out = *in - in.LoadBalancer.DeepCopyInto(&out.LoadBalancer) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceStatus. -func (in *ServiceStatus) DeepCopy() *ServiceStatus { - if in == nil { - return nil - } - out := new(ServiceStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SessionAffinityConfig) DeepCopyInto(out *SessionAffinityConfig) { - *out = *in - if in.ClientIP != nil { - in, out := &in.ClientIP, &out.ClientIP - if *in == nil { - *out = nil - } else { - *out = new(ClientIPConfig) - (*in).DeepCopyInto(*out) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SessionAffinityConfig. -func (in *SessionAffinityConfig) DeepCopy() *SessionAffinityConfig { - if in == nil { - return nil - } - out := new(SessionAffinityConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *StorageOSPersistentVolumeSource) DeepCopyInto(out *StorageOSPersistentVolumeSource) { - *out = *in - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - if *in == nil { - *out = nil - } else { - *out = new(ObjectReference) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageOSPersistentVolumeSource. -func (in *StorageOSPersistentVolumeSource) DeepCopy() *StorageOSPersistentVolumeSource { - if in == nil { - return nil - } - out := new(StorageOSPersistentVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *StorageOSVolumeSource) DeepCopyInto(out *StorageOSVolumeSource) { - *out = *in - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - if *in == nil { - *out = nil - } else { - *out = new(LocalObjectReference) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageOSVolumeSource. -func (in *StorageOSVolumeSource) DeepCopy() *StorageOSVolumeSource { - if in == nil { - return nil - } - out := new(StorageOSVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Sysctl) DeepCopyInto(out *Sysctl) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Sysctl. -func (in *Sysctl) DeepCopy() *Sysctl { - if in == nil { - return nil - } - out := new(Sysctl) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TCPSocketAction) DeepCopyInto(out *TCPSocketAction) { - *out = *in - out.Port = in.Port - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPSocketAction. -func (in *TCPSocketAction) DeepCopy() *TCPSocketAction { - if in == nil { - return nil - } - out := new(TCPSocketAction) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Taint) DeepCopyInto(out *Taint) { - *out = *in - in.TimeAdded.DeepCopyInto(&out.TimeAdded) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Taint. -func (in *Taint) DeepCopy() *Taint { - if in == nil { - return nil - } - out := new(Taint) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Toleration) DeepCopyInto(out *Toleration) { - *out = *in - if in.TolerationSeconds != nil { - in, out := &in.TolerationSeconds, &out.TolerationSeconds - if *in == nil { - *out = nil - } else { - *out = new(int64) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Toleration. -func (in *Toleration) DeepCopy() *Toleration { - if in == nil { - return nil - } - out := new(Toleration) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Volume) DeepCopyInto(out *Volume) { - *out = *in - in.VolumeSource.DeepCopyInto(&out.VolumeSource) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Volume. -func (in *Volume) DeepCopy() *Volume { - if in == nil { - return nil - } - out := new(Volume) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeMount) DeepCopyInto(out *VolumeMount) { - *out = *in - if in.MountPropagation != nil { - in, out := &in.MountPropagation, &out.MountPropagation - if *in == nil { - *out = nil - } else { - *out = new(MountPropagationMode) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeMount. -func (in *VolumeMount) DeepCopy() *VolumeMount { - if in == nil { - return nil - } - out := new(VolumeMount) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeProjection) DeepCopyInto(out *VolumeProjection) { - *out = *in - if in.Secret != nil { - in, out := &in.Secret, &out.Secret - if *in == nil { - *out = nil - } else { - *out = new(SecretProjection) - (*in).DeepCopyInto(*out) - } - } - if in.DownwardAPI != nil { - in, out := &in.DownwardAPI, &out.DownwardAPI - if *in == nil { - *out = nil - } else { - *out = new(DownwardAPIProjection) - (*in).DeepCopyInto(*out) - } - } - if in.ConfigMap != nil { - in, out := &in.ConfigMap, &out.ConfigMap - if *in == nil { - *out = nil - } else { - *out = new(ConfigMapProjection) - (*in).DeepCopyInto(*out) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeProjection. -func (in *VolumeProjection) DeepCopy() *VolumeProjection { - if in == nil { - return nil - } - out := new(VolumeProjection) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeSource) DeepCopyInto(out *VolumeSource) { - *out = *in - if in.HostPath != nil { - in, out := &in.HostPath, &out.HostPath - if *in == nil { - *out = nil - } else { - *out = new(HostPathVolumeSource) - (*in).DeepCopyInto(*out) - } - } - if in.EmptyDir != nil { - in, out := &in.EmptyDir, &out.EmptyDir - if *in == nil { - *out = nil - } else { - *out = new(EmptyDirVolumeSource) - (*in).DeepCopyInto(*out) - } - } - if in.GCEPersistentDisk != nil { - in, out := &in.GCEPersistentDisk, &out.GCEPersistentDisk - if *in == nil { - *out = nil - } else { - *out = new(GCEPersistentDiskVolumeSource) - **out = **in - } - } - if in.AWSElasticBlockStore != nil { - in, out := &in.AWSElasticBlockStore, &out.AWSElasticBlockStore - if *in == nil { - *out = nil - } else { - *out = new(AWSElasticBlockStoreVolumeSource) - **out = **in - } - } - if in.GitRepo != nil { - in, out := &in.GitRepo, &out.GitRepo - if *in == nil { - *out = nil - } else { - *out = new(GitRepoVolumeSource) - **out = **in - } - } - if in.Secret != nil { - in, out := &in.Secret, &out.Secret - if *in == nil { - *out = nil - } else { - *out = new(SecretVolumeSource) - (*in).DeepCopyInto(*out) - } - } - if in.NFS != nil { - in, out := &in.NFS, &out.NFS - if *in == nil { - *out = nil - } else { - *out = new(NFSVolumeSource) - **out = **in - } - } - if in.ISCSI != nil { - in, out := &in.ISCSI, &out.ISCSI - if *in == nil { - *out = nil - } else { - *out = new(ISCSIVolumeSource) - (*in).DeepCopyInto(*out) - } - } - if in.Glusterfs != nil { - in, out := &in.Glusterfs, &out.Glusterfs - if *in == nil { - *out = nil - } else { - *out = new(GlusterfsVolumeSource) - **out = **in - } - } - if in.PersistentVolumeClaim != nil { - in, out := &in.PersistentVolumeClaim, &out.PersistentVolumeClaim - if *in == nil { - *out = nil - } else { - *out = new(PersistentVolumeClaimVolumeSource) - **out = **in - } - } - if in.RBD != nil { - in, out := &in.RBD, &out.RBD - if *in == nil { - *out = nil - } else { - *out = new(RBDVolumeSource) - (*in).DeepCopyInto(*out) - } - } - if in.Quobyte != nil { - in, out := &in.Quobyte, &out.Quobyte - if *in == nil { - *out = nil - } else { - *out = new(QuobyteVolumeSource) - **out = **in - } - } - if in.FlexVolume != nil { - in, out := &in.FlexVolume, &out.FlexVolume - if *in == nil { - *out = nil - } else { - *out = new(FlexVolumeSource) - (*in).DeepCopyInto(*out) - } - } - if in.Cinder != nil { - in, out := &in.Cinder, &out.Cinder - if *in == nil { - *out = nil - } else { - *out = new(CinderVolumeSource) - **out = **in - } - } - if in.CephFS != nil { - in, out := &in.CephFS, &out.CephFS - if *in == nil { - *out = nil - } else { - *out = new(CephFSVolumeSource) - (*in).DeepCopyInto(*out) - } - } - if in.Flocker != nil { - in, out := &in.Flocker, &out.Flocker - if *in == nil { - *out = nil - } else { - *out = new(FlockerVolumeSource) - **out = **in - } - } - if in.DownwardAPI != nil { - in, out := &in.DownwardAPI, &out.DownwardAPI - if *in == nil { - *out = nil - } else { - *out = new(DownwardAPIVolumeSource) - (*in).DeepCopyInto(*out) - } - } - if in.FC != nil { - in, out := &in.FC, &out.FC - if *in == nil { - *out = nil - } else { - *out = new(FCVolumeSource) - (*in).DeepCopyInto(*out) - } - } - if in.AzureFile != nil { - in, out := &in.AzureFile, &out.AzureFile - if *in == nil { - *out = nil - } else { - *out = new(AzureFileVolumeSource) - **out = **in - } - } - if in.ConfigMap != nil { - in, out := &in.ConfigMap, &out.ConfigMap - if *in == nil { - *out = nil - } else { - *out = new(ConfigMapVolumeSource) - (*in).DeepCopyInto(*out) - } - } - if in.VsphereVolume != nil { - in, out := &in.VsphereVolume, &out.VsphereVolume - if *in == nil { - *out = nil - } else { - *out = new(VsphereVirtualDiskVolumeSource) - **out = **in - } - } - if in.AzureDisk != nil { - in, out := &in.AzureDisk, &out.AzureDisk - if *in == nil { - *out = nil - } else { - *out = new(AzureDiskVolumeSource) - (*in).DeepCopyInto(*out) - } - } - if in.PhotonPersistentDisk != nil { - in, out := &in.PhotonPersistentDisk, &out.PhotonPersistentDisk - if *in == nil { - *out = nil - } else { - *out = new(PhotonPersistentDiskVolumeSource) - **out = **in - } - } - if in.Projected != nil { - in, out := &in.Projected, &out.Projected - if *in == nil { - *out = nil - } else { - *out = new(ProjectedVolumeSource) - (*in).DeepCopyInto(*out) - } - } - if in.PortworxVolume != nil { - in, out := &in.PortworxVolume, &out.PortworxVolume - if *in == nil { - *out = nil - } else { - *out = new(PortworxVolumeSource) - **out = **in - } - } - if in.ScaleIO != nil { - in, out := &in.ScaleIO, &out.ScaleIO - if *in == nil { - *out = nil - } else { - *out = new(ScaleIOVolumeSource) - (*in).DeepCopyInto(*out) - } - } - if in.StorageOS != nil { - in, out := &in.StorageOS, &out.StorageOS - if *in == nil { - *out = nil - } else { - *out = new(StorageOSVolumeSource) - (*in).DeepCopyInto(*out) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeSource. -func (in *VolumeSource) DeepCopy() *VolumeSource { - if in == nil { - return nil - } - out := new(VolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VsphereVirtualDiskVolumeSource) DeepCopyInto(out *VsphereVirtualDiskVolumeSource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VsphereVirtualDiskVolumeSource. -func (in *VsphereVirtualDiskVolumeSource) DeepCopy() *VsphereVirtualDiskVolumeSource { - if in == nil { - return nil - } - out := new(VsphereVirtualDiskVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *WeightedPodAffinityTerm) DeepCopyInto(out *WeightedPodAffinityTerm) { - *out = *in - in.PodAffinityTerm.DeepCopyInto(&out.PodAffinityTerm) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WeightedPodAffinityTerm. -func (in *WeightedPodAffinityTerm) DeepCopy() *WeightedPodAffinityTerm { - if in == nil { - return nil - } - out := new(WeightedPodAffinityTerm) - in.DeepCopyInto(out) - return out -}